xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c (revision 5946dbe1c802efef3b12a4eecab1471f725f4ca9)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36 
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39 
40 #include "vega10_enum.h"
41 
42 #include "soc15_common.h"
43 #include "clearstate_gfx9.h"
44 #include "v9_structs.h"
45 
46 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47 
48 #include "amdgpu_ras.h"
49 
50 #include "amdgpu_ring_mux.h"
51 #include "gfx_v9_4.h"
52 #include "gfx_v9_0.h"
53 #include "gfx_v9_0_cleaner_shader.h"
54 #include "gfx_v9_4_2.h"
55 
56 #include "asic_reg/pwr/pwr_10_0_offset.h"
57 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
58 #include "asic_reg/gc/gc_9_0_default.h"
59 
60 #define GFX9_NUM_GFX_RINGS     1
61 #define GFX9_NUM_SW_GFX_RINGS  2
62 #define GFX9_MEC_HPD_SIZE 4096
63 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
64 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
65 
66 #define mmGCEA_PROBE_MAP                        0x070c
67 #define mmGCEA_PROBE_MAP_BASE_IDX               0
68 
69 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
72 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
73 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
74 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
75 
76 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
79 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
80 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
82 
83 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
86 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
87 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
88 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
89 
90 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
91 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/raven_me.bin");
93 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
94 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
95 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
96 
97 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
101 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
102 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
103 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
104 
105 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
108 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
109 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
110 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
111 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
112 
113 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
114 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
115 
116 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
119 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
120 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
121 
122 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
125 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
126 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
127 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
128 
129 MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
130 MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
131 MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
132 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
133 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
134 
135 #define mmTCP_CHAN_STEER_0_ARCT								0x0b03
136 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX							0
137 #define mmTCP_CHAN_STEER_1_ARCT								0x0b04
138 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX							0
139 #define mmTCP_CHAN_STEER_2_ARCT								0x0b09
140 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX							0
141 #define mmTCP_CHAN_STEER_3_ARCT								0x0b0a
142 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX							0
143 #define mmTCP_CHAN_STEER_4_ARCT								0x0b0b
144 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX							0
145 #define mmTCP_CHAN_STEER_5_ARCT								0x0b0c
146 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX							0
147 
148 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir                0x0025
149 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX       1
150 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir                0x0026
151 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX       1
152 
153 static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
154 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS),
155 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2),
156 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT1),
157 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT2),
158 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STALLED_STAT1),
159 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STALLED_STAT1),
160 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_BUSY_STAT),
161 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT),
162 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT),
163 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STATUS),
164 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_ERROR),
165 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_BASE),
166 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_RPTR),
167 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR),
168 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_BASE),
169 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_RPTR),
170 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_WPTR),
171 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_BASE),
172 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_RPTR),
173 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_WPTR),
174 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_BASE),
175 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
176 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
177 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_CMD_BUFSZ),
178 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_CMD_BUFSZ),
179 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_CMD_BUFSZ),
180 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_CMD_BUFSZ),
181 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_LO),
182 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_HI),
183 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BUFSZ),
184 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_LO),
185 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_HI),
186 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BUFSZ),
187 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_LO),
188 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_HI),
189 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BUFSZ),
190 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_LO),
191 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_HI),
192 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BUFSZ),
193 	SOC15_REG_ENTRY_STR(GC, 0, mmCPF_UTCL1_STATUS),
194 	SOC15_REG_ENTRY_STR(GC, 0, mmCPC_UTCL1_STATUS),
195 	SOC15_REG_ENTRY_STR(GC, 0, mmCPG_UTCL1_STATUS),
196 	SOC15_REG_ENTRY_STR(GC, 0, mmGDS_PROTECTION_FAULT),
197 	SOC15_REG_ENTRY_STR(GC, 0, mmGDS_VM_PROTECTION_FAULT),
198 	SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS),
199 	SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_CNTL),
200 	SOC15_REG_ENTRY_STR(GC, 0, mmPA_CL_CNTL_STATUS),
201 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_UTCL1_STATUS),
202 	SOC15_REG_ENTRY_STR(GC, 0, mmRMI_UTCL1_STATUS),
203 	SOC15_REG_ENTRY_STR(GC, 0, mmSQC_DCACHE_UTCL1_STATUS),
204 	SOC15_REG_ENTRY_STR(GC, 0, mmSQC_ICACHE_UTCL1_STATUS),
205 	SOC15_REG_ENTRY_STR(GC, 0, mmSQ_UTCL1_STATUS),
206 	SOC15_REG_ENTRY_STR(GC, 0, mmTCP_UTCL1_STATUS),
207 	SOC15_REG_ENTRY_STR(GC, 0, mmWD_UTCL1_STATUS),
208 	SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL),
209 	SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS),
210 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_DEBUG),
211 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_CNTL),
212 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_INSTR_PNTR),
213 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC1_INSTR_PNTR),
214 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC2_INSTR_PNTR),
215 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_INSTR_PNTR),
216 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_INSTR_PNTR),
217 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STATUS),
218 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_STAT),
219 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_COMMAND),
220 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_MESSAGE),
221 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_1),
222 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_2),
223 	SOC15_REG_ENTRY_STR(GC, 0, mmSMU_RLC_RESPONSE),
224 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SAFE_MODE),
225 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
226 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
227 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
228 	/* SE status registers */
229 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
230 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
231 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
232 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
233 	/* packet headers */
234 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
235 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
236 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
237 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
238 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
239 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
240 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
241 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
242 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
243 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
244 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
245 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
246 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
247 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
248 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
249 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
250 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
251 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
252 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
253 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
254 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
255 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
256 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
257 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
258 };
259 
260 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
261 	/* compute queue registers */
262 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_VMID),
263 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ACTIVE),
264 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PERSISTENT_STATE),
265 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PIPE_PRIORITY),
266 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUEUE_PRIORITY),
267 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUANTUM),
268 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE),
269 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE_HI),
270 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_RPTR),
271 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
272 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
273 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
274 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_CONTROL),
275 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR),
276 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR_HI),
277 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_RPTR),
278 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_CONTROL),
279 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_REQUEST),
280 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
281 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI),
282 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_CONTROL),
283 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_RPTR),
284 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR),
285 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_EVENTS),
286 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_LO),
287 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_HI),
288 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_CONTROL),
289 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_OFFSET),
290 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_SIZE),
291 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_WG_STATE_OFFSET),
292 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_SIZE),
293 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GDS_RESOURCE_STATE),
294 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ERROR),
295 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR_MEM),
296 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
297 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
298 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
299 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
300 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
301 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
302 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
303 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
304 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
305 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
306 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
307 };
308 
309 enum ta_ras_gfx_subblock {
310 	/*CPC*/
311 	TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
312 	TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
313 	TA_RAS_BLOCK__GFX_CPC_UCODE,
314 	TA_RAS_BLOCK__GFX_DC_STATE_ME1,
315 	TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
316 	TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
317 	TA_RAS_BLOCK__GFX_DC_STATE_ME2,
318 	TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
319 	TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
320 	TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
321 	/* CPF*/
322 	TA_RAS_BLOCK__GFX_CPF_INDEX_START,
323 	TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
324 	TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
325 	TA_RAS_BLOCK__GFX_CPF_TAG,
326 	TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
327 	/* CPG*/
328 	TA_RAS_BLOCK__GFX_CPG_INDEX_START,
329 	TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
330 	TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
331 	TA_RAS_BLOCK__GFX_CPG_TAG,
332 	TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
333 	/* GDS*/
334 	TA_RAS_BLOCK__GFX_GDS_INDEX_START,
335 	TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
336 	TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
337 	TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
338 	TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
339 	TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
340 	TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
341 	/* SPI*/
342 	TA_RAS_BLOCK__GFX_SPI_SR_MEM,
343 	/* SQ*/
344 	TA_RAS_BLOCK__GFX_SQ_INDEX_START,
345 	TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
346 	TA_RAS_BLOCK__GFX_SQ_LDS_D,
347 	TA_RAS_BLOCK__GFX_SQ_LDS_I,
348 	TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
349 	TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
350 	/* SQC (3 ranges)*/
351 	TA_RAS_BLOCK__GFX_SQC_INDEX_START,
352 	/* SQC range 0*/
353 	TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
354 	TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
355 		TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
356 	TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
357 	TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
358 	TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
359 	TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
360 	TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
361 	TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
362 	TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
363 		TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
364 	/* SQC range 1*/
365 	TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
366 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
367 		TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
368 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
369 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
370 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
371 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
372 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
373 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
374 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
375 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
376 	TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
377 		TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
378 	/* SQC range 2*/
379 	TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
380 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
381 		TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
382 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
383 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
384 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
385 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
386 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
387 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
388 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
389 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
390 	TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
391 		TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
392 	TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
393 	/* TA*/
394 	TA_RAS_BLOCK__GFX_TA_INDEX_START,
395 	TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
396 	TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
397 	TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
398 	TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
399 	TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
400 	TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
401 	/* TCA*/
402 	TA_RAS_BLOCK__GFX_TCA_INDEX_START,
403 	TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
404 	TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
405 	TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
406 	/* TCC (5 sub-ranges)*/
407 	TA_RAS_BLOCK__GFX_TCC_INDEX_START,
408 	/* TCC range 0*/
409 	TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
410 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
411 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
412 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
413 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
414 	TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
415 	TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
416 	TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
417 	TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
418 	TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
419 	/* TCC range 1*/
420 	TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
421 	TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
422 	TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
423 	TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
424 		TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
425 	/* TCC range 2*/
426 	TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
427 	TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
428 	TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
429 	TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
430 	TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
431 	TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
432 	TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
433 	TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
434 	TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
435 	TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
436 		TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
437 	/* TCC range 3*/
438 	TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
439 	TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
440 	TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
441 	TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
442 		TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
443 	/* TCC range 4*/
444 	TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
445 	TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
446 		TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
447 	TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
448 	TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
449 		TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
450 	TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
451 	/* TCI*/
452 	TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
453 	/* TCP*/
454 	TA_RAS_BLOCK__GFX_TCP_INDEX_START,
455 	TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
456 	TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
457 	TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
458 	TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
459 	TA_RAS_BLOCK__GFX_TCP_DB_RAM,
460 	TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
461 	TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
462 	TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
463 	/* TD*/
464 	TA_RAS_BLOCK__GFX_TD_INDEX_START,
465 	TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
466 	TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
467 	TA_RAS_BLOCK__GFX_TD_CS_FIFO,
468 	TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
469 	/* EA (3 sub-ranges)*/
470 	TA_RAS_BLOCK__GFX_EA_INDEX_START,
471 	/* EA range 0*/
472 	TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
473 	TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
474 	TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
475 	TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
476 	TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
477 	TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
478 	TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
479 	TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
480 	TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
481 	TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
482 	/* EA range 1*/
483 	TA_RAS_BLOCK__GFX_EA_INDEX1_START,
484 	TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
485 	TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
486 	TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
487 	TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
488 	TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
489 	TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
490 	TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
491 	TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
492 	/* EA range 2*/
493 	TA_RAS_BLOCK__GFX_EA_INDEX2_START,
494 	TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
495 	TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
496 	TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
497 	TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
498 	TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
499 	TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
500 	/* UTC VM L2 bank*/
501 	TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
502 	/* UTC VM walker*/
503 	TA_RAS_BLOCK__UTC_VML2_WALKER,
504 	/* UTC ATC L2 2MB cache*/
505 	TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
506 	/* UTC ATC L2 4KB cache*/
507 	TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
508 	TA_RAS_BLOCK__GFX_MAX
509 };
510 
511 struct ras_gfx_subblock {
512 	unsigned char *name;
513 	int ta_subblock;
514 	int hw_supported_error_type;
515 	int sw_supported_error_type;
516 };
517 
518 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
519 	[AMDGPU_RAS_BLOCK__##subblock] = {                                     \
520 		#subblock,                                                     \
521 		TA_RAS_BLOCK__##subblock,                                      \
522 		((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
523 		(((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
524 	}
525 
526 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
527 	AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
528 	AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
529 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
530 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
531 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
532 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
533 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
534 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
535 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
536 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
537 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
538 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
539 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
540 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
541 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
542 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
543 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
544 			     0),
545 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
546 			     0),
547 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
548 	AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
549 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
550 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
551 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
552 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
553 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
554 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
555 			     0, 0),
556 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
557 			     0),
558 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
559 			     0, 0),
560 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
561 			     0),
562 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
563 			     0, 0),
564 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
565 			     0),
566 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
567 			     1),
568 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
569 			     0, 0, 0),
570 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
571 			     0),
572 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
573 			     0),
574 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
575 			     0),
576 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
577 			     0),
578 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
579 			     0),
580 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
581 			     0, 0),
582 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
583 			     0),
584 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
585 			     0),
586 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
587 			     0, 0, 0),
588 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
589 			     0),
590 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
591 			     0),
592 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
593 			     0),
594 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
595 			     0),
596 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
597 			     0),
598 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
599 			     0, 0),
600 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
601 			     0),
602 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
603 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
604 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
605 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
606 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
607 	AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
608 	AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
609 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
610 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
611 			     1),
612 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
613 			     1),
614 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
615 			     1),
616 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
617 			     0),
618 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
619 			     0),
620 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
621 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
622 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
623 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
624 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
625 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
626 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
627 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
628 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
629 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
630 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
631 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
632 			     0),
633 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
634 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
635 			     0),
636 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
637 			     0, 0),
638 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
639 			     0),
640 	AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
641 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
642 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
643 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
644 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
645 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
646 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
647 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
648 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
649 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
650 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
651 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
652 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
653 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
654 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
655 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
656 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
657 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
658 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
659 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
660 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
661 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
662 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
663 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
664 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
665 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
666 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
667 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
668 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
669 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
670 	AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
671 	AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
672 	AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
673 	AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
674 };
675 
676 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
677 {
678 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
679 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
680 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
681 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
682 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
683 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
684 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
685 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
686 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
687 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
688 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
689 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
690 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
691 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
692 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
693 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
694 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
695 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
696 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
697 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
698 };
699 
700 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
701 {
702 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
703 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
704 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
705 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
706 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
707 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
708 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
709 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
710 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
711 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
712 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
713 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
714 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
715 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
716 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
717 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
718 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
719 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
720 };
721 
722 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
723 {
724 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
725 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
726 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
727 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
728 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
729 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
730 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
731 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
732 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
733 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
734 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
735 };
736 
737 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
738 {
739 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
740 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
741 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
742 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
743 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
744 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
745 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
746 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
747 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
748 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
749 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
750 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
751 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
752 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
753 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
754 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
755 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
756 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
757 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
758 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
759 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
760 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
761 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
762 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
763 };
764 
765 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
766 {
767 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
768 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
769 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
770 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
771 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
772 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
773 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
774 };
775 
776 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
777 {
778 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
779 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
780 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
781 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
782 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
783 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
784 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
785 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
786 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
787 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
788 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
789 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
790 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
791 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
792 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
793 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
794 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
795 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
796 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
797 };
798 
799 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
800 {
801 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
802 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
803 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
804 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
805 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
806 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
807 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
808 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
809 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
810 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
811 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
812 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
813 };
814 
815 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
816 {
817 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
818 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
819 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
820 };
821 
822 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
823 {
824 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
825 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
826 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
827 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
828 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
829 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
830 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
831 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
832 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
833 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
834 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
835 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
836 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
837 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
838 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
839 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
840 };
841 
842 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
843 {
844 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
845 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
846 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
847 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
848 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
849 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
850 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
851 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
852 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
853 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
854 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
855 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
856 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
857 };
858 
859 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
860 {
861 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
862 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
863 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
864 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
865 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
866 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
867 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
868 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
869 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
870 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
871 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
872 };
873 
874 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
875 	{SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
876 	{SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
877 };
878 
879 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
880 {
881 	mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
882 	mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
883 	mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
884 	mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
885 	mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
886 	mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
887 	mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
888 	mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
889 };
890 
891 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
892 {
893 	mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
894 	mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
895 	mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
896 	mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
897 	mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
898 	mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
899 	mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
900 	mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
901 };
902 
903 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
904 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
905 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
906 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
907 
908 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
909 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
910 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
911 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
912 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
913 				struct amdgpu_cu_info *cu_info);
914 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
915 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
916 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
917 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
918 					  void *ras_error_status);
919 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
920 				     void *inject_if, uint32_t instance_mask);
921 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
922 static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
923 					      unsigned int vmid);
924 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
925 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
926 
927 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
928 				uint64_t queue_mask)
929 {
930 	struct amdgpu_device *adev = kiq_ring->adev;
931 	u64 shader_mc_addr;
932 
933 	/* Cleaner shader MC address */
934 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
935 
936 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
937 	amdgpu_ring_write(kiq_ring,
938 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
939 		/* vmid_mask:0* queue_type:0 (KIQ) */
940 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
941 	amdgpu_ring_write(kiq_ring,
942 			lower_32_bits(queue_mask));	/* queue mask lo */
943 	amdgpu_ring_write(kiq_ring,
944 			upper_32_bits(queue_mask));	/* queue mask hi */
945 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
946 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
947 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
948 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
949 }
950 
951 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
952 				 struct amdgpu_ring *ring)
953 {
954 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
955 	uint64_t wptr_addr = ring->wptr_gpu_addr;
956 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
957 
958 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
959 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
960 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
961 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
962 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
963 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
964 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
965 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
966 			 /*queue_type: normal compute queue */
967 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
968 			 /* alloc format: all_on_one_pipe */
969 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
970 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
971 			 /* num_queues: must be 1 */
972 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
973 	amdgpu_ring_write(kiq_ring,
974 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
975 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
976 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
977 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
978 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
979 }
980 
981 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
982 				   struct amdgpu_ring *ring,
983 				   enum amdgpu_unmap_queues_action action,
984 				   u64 gpu_addr, u64 seq)
985 {
986 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
987 
988 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
989 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
990 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
991 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
992 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
993 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
994 	amdgpu_ring_write(kiq_ring,
995 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
996 
997 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
998 		amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask));
999 		amdgpu_ring_write(kiq_ring, 0);
1000 		amdgpu_ring_write(kiq_ring, 0);
1001 
1002 	} else {
1003 		amdgpu_ring_write(kiq_ring, 0);
1004 		amdgpu_ring_write(kiq_ring, 0);
1005 		amdgpu_ring_write(kiq_ring, 0);
1006 	}
1007 }
1008 
1009 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
1010 				   struct amdgpu_ring *ring,
1011 				   u64 addr,
1012 				   u64 seq)
1013 {
1014 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
1015 
1016 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
1017 	amdgpu_ring_write(kiq_ring,
1018 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
1019 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
1020 			  PACKET3_QUERY_STATUS_COMMAND(2));
1021 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
1022 	amdgpu_ring_write(kiq_ring,
1023 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
1024 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
1025 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
1026 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
1027 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
1028 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
1029 }
1030 
1031 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
1032 				uint16_t pasid, uint32_t flush_type,
1033 				bool all_hub)
1034 {
1035 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
1036 	amdgpu_ring_write(kiq_ring,
1037 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
1038 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
1039 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
1040 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
1041 }
1042 
1043 
1044 static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
1045 					uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
1046 					uint32_t xcc_id, uint32_t vmid)
1047 {
1048 	struct amdgpu_device *adev = kiq_ring->adev;
1049 	unsigned i;
1050 
1051 	/* enter save mode */
1052 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1053 	mutex_lock(&adev->srbm_mutex);
1054 	soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0);
1055 
1056 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1057 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
1058 		WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
1059 		/* wait till dequeue take effects */
1060 		for (i = 0; i < adev->usec_timeout; i++) {
1061 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
1062 				break;
1063 			udelay(1);
1064 		}
1065 		if (i >= adev->usec_timeout)
1066 			dev_err(adev->dev, "fail to wait on hqd deactive\n");
1067 	} else {
1068 		dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
1069 	}
1070 
1071 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
1072 	mutex_unlock(&adev->srbm_mutex);
1073 	/* exit safe mode */
1074 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1075 }
1076 
1077 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
1078 	.kiq_set_resources = gfx_v9_0_kiq_set_resources,
1079 	.kiq_map_queues = gfx_v9_0_kiq_map_queues,
1080 	.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
1081 	.kiq_query_status = gfx_v9_0_kiq_query_status,
1082 	.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
1083 	.kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue,
1084 	.set_resources_size = 8,
1085 	.map_queues_size = 7,
1086 	.unmap_queues_size = 6,
1087 	.query_status_size = 7,
1088 	.invalidate_tlbs_size = 2,
1089 };
1090 
1091 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
1092 {
1093 	adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
1094 }
1095 
1096 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
1097 {
1098 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1099 	case IP_VERSION(9, 0, 1):
1100 		soc15_program_register_sequence(adev,
1101 						golden_settings_gc_9_0,
1102 						ARRAY_SIZE(golden_settings_gc_9_0));
1103 		soc15_program_register_sequence(adev,
1104 						golden_settings_gc_9_0_vg10,
1105 						ARRAY_SIZE(golden_settings_gc_9_0_vg10));
1106 		break;
1107 	case IP_VERSION(9, 2, 1):
1108 		soc15_program_register_sequence(adev,
1109 						golden_settings_gc_9_2_1,
1110 						ARRAY_SIZE(golden_settings_gc_9_2_1));
1111 		soc15_program_register_sequence(adev,
1112 						golden_settings_gc_9_2_1_vg12,
1113 						ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
1114 		break;
1115 	case IP_VERSION(9, 4, 0):
1116 		soc15_program_register_sequence(adev,
1117 						golden_settings_gc_9_0,
1118 						ARRAY_SIZE(golden_settings_gc_9_0));
1119 		soc15_program_register_sequence(adev,
1120 						golden_settings_gc_9_0_vg20,
1121 						ARRAY_SIZE(golden_settings_gc_9_0_vg20));
1122 		break;
1123 	case IP_VERSION(9, 4, 1):
1124 		soc15_program_register_sequence(adev,
1125 						golden_settings_gc_9_4_1_arct,
1126 						ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
1127 		break;
1128 	case IP_VERSION(9, 2, 2):
1129 	case IP_VERSION(9, 1, 0):
1130 		soc15_program_register_sequence(adev, golden_settings_gc_9_1,
1131 						ARRAY_SIZE(golden_settings_gc_9_1));
1132 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1133 			soc15_program_register_sequence(adev,
1134 							golden_settings_gc_9_1_rv2,
1135 							ARRAY_SIZE(golden_settings_gc_9_1_rv2));
1136 		else
1137 			soc15_program_register_sequence(adev,
1138 							golden_settings_gc_9_1_rv1,
1139 							ARRAY_SIZE(golden_settings_gc_9_1_rv1));
1140 		break;
1141 	 case IP_VERSION(9, 3, 0):
1142 		soc15_program_register_sequence(adev,
1143 						golden_settings_gc_9_1_rn,
1144 						ARRAY_SIZE(golden_settings_gc_9_1_rn));
1145 		return; /* for renoir, don't need common goldensetting */
1146 	case IP_VERSION(9, 4, 2):
1147 		gfx_v9_4_2_init_golden_registers(adev,
1148 						 adev->smuio.funcs->get_die_id(adev));
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
1155 	    (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)))
1156 		soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
1157 						(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
1158 }
1159 
1160 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1161 				       bool wc, uint32_t reg, uint32_t val)
1162 {
1163 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1164 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1165 				WRITE_DATA_DST_SEL(0) |
1166 				(wc ? WR_CONFIRM : 0));
1167 	amdgpu_ring_write(ring, reg);
1168 	amdgpu_ring_write(ring, 0);
1169 	amdgpu_ring_write(ring, val);
1170 }
1171 
1172 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1173 				  int mem_space, int opt, uint32_t addr0,
1174 				  uint32_t addr1, uint32_t ref, uint32_t mask,
1175 				  uint32_t inv)
1176 {
1177 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1178 	amdgpu_ring_write(ring,
1179 				 /* memory (1) or register (0) */
1180 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1181 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
1182 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1183 				 WAIT_REG_MEM_ENGINE(eng_sel)));
1184 
1185 	if (mem_space)
1186 		BUG_ON(addr0 & 0x3); /* Dword align */
1187 	amdgpu_ring_write(ring, addr0);
1188 	amdgpu_ring_write(ring, addr1);
1189 	amdgpu_ring_write(ring, ref);
1190 	amdgpu_ring_write(ring, mask);
1191 	amdgpu_ring_write(ring, inv); /* poll interval */
1192 }
1193 
1194 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1195 {
1196 	struct amdgpu_device *adev = ring->adev;
1197 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1198 	uint32_t tmp = 0;
1199 	unsigned i;
1200 	int r;
1201 
1202 	WREG32(scratch, 0xCAFEDEAD);
1203 	r = amdgpu_ring_alloc(ring, 3);
1204 	if (r)
1205 		return r;
1206 
1207 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1208 	amdgpu_ring_write(ring, scratch - PACKET3_SET_UCONFIG_REG_START);
1209 	amdgpu_ring_write(ring, 0xDEADBEEF);
1210 	amdgpu_ring_commit(ring);
1211 
1212 	for (i = 0; i < adev->usec_timeout; i++) {
1213 		tmp = RREG32(scratch);
1214 		if (tmp == 0xDEADBEEF)
1215 			break;
1216 		udelay(1);
1217 	}
1218 
1219 	if (i >= adev->usec_timeout)
1220 		r = -ETIMEDOUT;
1221 	return r;
1222 }
1223 
1224 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1225 {
1226 	struct amdgpu_device *adev = ring->adev;
1227 	struct amdgpu_ib ib;
1228 	struct dma_fence *f = NULL;
1229 
1230 	unsigned index;
1231 	uint64_t gpu_addr;
1232 	uint32_t tmp;
1233 	long r;
1234 
1235 	r = amdgpu_device_wb_get(adev, &index);
1236 	if (r)
1237 		return r;
1238 
1239 	gpu_addr = adev->wb.gpu_addr + (index * 4);
1240 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1241 	memset(&ib, 0, sizeof(ib));
1242 
1243 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
1244 	if (r)
1245 		goto err1;
1246 
1247 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1248 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1249 	ib.ptr[2] = lower_32_bits(gpu_addr);
1250 	ib.ptr[3] = upper_32_bits(gpu_addr);
1251 	ib.ptr[4] = 0xDEADBEEF;
1252 	ib.length_dw = 5;
1253 
1254 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1255 	if (r)
1256 		goto err2;
1257 
1258 	r = dma_fence_wait_timeout(f, false, timeout);
1259 	if (r == 0) {
1260 		r = -ETIMEDOUT;
1261 		goto err2;
1262 	} else if (r < 0) {
1263 		goto err2;
1264 	}
1265 
1266 	tmp = adev->wb.wb[index];
1267 	if (tmp == 0xDEADBEEF)
1268 		r = 0;
1269 	else
1270 		r = -EINVAL;
1271 
1272 err2:
1273 	amdgpu_ib_free(&ib, NULL);
1274 	dma_fence_put(f);
1275 err1:
1276 	amdgpu_device_wb_free(adev, index);
1277 	return r;
1278 }
1279 
1280 
1281 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1282 {
1283 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
1284 	amdgpu_ucode_release(&adev->gfx.me_fw);
1285 	amdgpu_ucode_release(&adev->gfx.ce_fw);
1286 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
1287 	amdgpu_ucode_release(&adev->gfx.mec_fw);
1288 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
1289 
1290 	kfree(adev->gfx.rlc.register_list_format);
1291 }
1292 
1293 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1294 {
1295 	adev->gfx.me_fw_write_wait = false;
1296 	adev->gfx.mec_fw_write_wait = false;
1297 
1298 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
1299 	    (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) &&
1300 	    ((adev->gfx.mec_fw_version < 0x000001a5) ||
1301 	     (adev->gfx.mec_feature_version < 46) ||
1302 	     (adev->gfx.pfp_fw_version < 0x000000b7) ||
1303 	     (adev->gfx.pfp_feature_version < 46)))
1304 		drm_warn_once(adev_to_drm(adev),
1305 			      "CP firmware version too old, please update!");
1306 
1307 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1308 	case IP_VERSION(9, 0, 1):
1309 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1310 		    (adev->gfx.me_feature_version >= 42) &&
1311 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1312 		    (adev->gfx.pfp_feature_version >= 42))
1313 			adev->gfx.me_fw_write_wait = true;
1314 
1315 		if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1316 		    (adev->gfx.mec_feature_version >= 42))
1317 			adev->gfx.mec_fw_write_wait = true;
1318 		break;
1319 	case IP_VERSION(9, 2, 1):
1320 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1321 		    (adev->gfx.me_feature_version >= 44) &&
1322 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1323 		    (adev->gfx.pfp_feature_version >= 44))
1324 			adev->gfx.me_fw_write_wait = true;
1325 
1326 		if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1327 		    (adev->gfx.mec_feature_version >= 44))
1328 			adev->gfx.mec_fw_write_wait = true;
1329 		break;
1330 	case IP_VERSION(9, 4, 0):
1331 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1332 		    (adev->gfx.me_feature_version >= 44) &&
1333 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1334 		    (adev->gfx.pfp_feature_version >= 44))
1335 			adev->gfx.me_fw_write_wait = true;
1336 
1337 		if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1338 		    (adev->gfx.mec_feature_version >= 44))
1339 			adev->gfx.mec_fw_write_wait = true;
1340 		break;
1341 	case IP_VERSION(9, 1, 0):
1342 	case IP_VERSION(9, 2, 2):
1343 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1344 		    (adev->gfx.me_feature_version >= 42) &&
1345 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1346 		    (adev->gfx.pfp_feature_version >= 42))
1347 			adev->gfx.me_fw_write_wait = true;
1348 
1349 		if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1350 		    (adev->gfx.mec_feature_version >= 42))
1351 			adev->gfx.mec_fw_write_wait = true;
1352 		break;
1353 	default:
1354 		adev->gfx.me_fw_write_wait = true;
1355 		adev->gfx.mec_fw_write_wait = true;
1356 		break;
1357 	}
1358 }
1359 
1360 struct amdgpu_gfxoff_quirk {
1361 	u16 chip_vendor;
1362 	u16 chip_device;
1363 	u16 subsys_vendor;
1364 	u16 subsys_device;
1365 	u8 revision;
1366 };
1367 
1368 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1369 	/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1370 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1371 	/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1372 	{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1373 	/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1374 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1375 	/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
1376 	{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
1377 	/* https://bbs.openkylin.top/t/topic/171497 */
1378 	{ 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
1379 	/* HP 705G4 DM with R5 2400G */
1380 	{ 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
1381 	{ 0, 0, 0, 0, 0 },
1382 };
1383 
1384 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1385 {
1386 	const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1387 
1388 	while (p && p->chip_device != 0) {
1389 		if (pdev->vendor == p->chip_vendor &&
1390 		    pdev->device == p->chip_device &&
1391 		    pdev->subsystem_vendor == p->subsys_vendor &&
1392 		    pdev->subsystem_device == p->subsys_device &&
1393 		    pdev->revision == p->revision) {
1394 			return true;
1395 		}
1396 		++p;
1397 	}
1398 	return false;
1399 }
1400 
1401 static bool is_raven_kicker(struct amdgpu_device *adev)
1402 {
1403 	if (adev->pm.fw_version >= 0x41e2b)
1404 		return true;
1405 	else
1406 		return false;
1407 }
1408 
1409 static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
1410 {
1411 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) &&
1412 	    (adev->gfx.me_fw_version >= 0x000000a5) &&
1413 	    (adev->gfx.me_feature_version >= 52))
1414 		return true;
1415 	else
1416 		return false;
1417 }
1418 
1419 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1420 {
1421 	if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1422 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1423 
1424 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1425 	case IP_VERSION(9, 0, 1):
1426 	case IP_VERSION(9, 2, 1):
1427 	case IP_VERSION(9, 4, 0):
1428 		break;
1429 	case IP_VERSION(9, 2, 2):
1430 	case IP_VERSION(9, 1, 0):
1431 		if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1432 		      (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1433 		    ((!is_raven_kicker(adev) &&
1434 		      adev->gfx.rlc_fw_version < 531) ||
1435 		     (adev->gfx.rlc_feature_version < 1) ||
1436 		     !adev->gfx.rlc.is_rlc_v2_1))
1437 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1438 
1439 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1440 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1441 				AMD_PG_SUPPORT_CP |
1442 				AMD_PG_SUPPORT_RLC_SMU_HS;
1443 		break;
1444 	case IP_VERSION(9, 3, 0):
1445 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1446 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1447 				AMD_PG_SUPPORT_CP |
1448 				AMD_PG_SUPPORT_RLC_SMU_HS;
1449 		break;
1450 	default:
1451 		break;
1452 	}
1453 }
1454 
1455 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1456 					  char *chip_name)
1457 {
1458 	int err;
1459 
1460 	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
1461 				   AMDGPU_UCODE_REQUIRED,
1462 				   "amdgpu/%s_pfp.bin", chip_name);
1463 	if (err)
1464 		goto out;
1465 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
1466 
1467 	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
1468 				   AMDGPU_UCODE_REQUIRED,
1469 				   "amdgpu/%s_me.bin", chip_name);
1470 	if (err)
1471 		goto out;
1472 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
1473 
1474 	err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
1475 				   AMDGPU_UCODE_REQUIRED,
1476 				   "amdgpu/%s_ce.bin", chip_name);
1477 	if (err)
1478 		goto out;
1479 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
1480 
1481 out:
1482 	if (err) {
1483 		amdgpu_ucode_release(&adev->gfx.pfp_fw);
1484 		amdgpu_ucode_release(&adev->gfx.me_fw);
1485 		amdgpu_ucode_release(&adev->gfx.ce_fw);
1486 	}
1487 	return err;
1488 }
1489 
1490 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1491 				       char *chip_name)
1492 {
1493 	int err;
1494 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1495 	uint16_t version_major;
1496 	uint16_t version_minor;
1497 	uint32_t smu_version;
1498 
1499 	/*
1500 	 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1501 	 * instead of picasso_rlc.bin.
1502 	 * Judgment method:
1503 	 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1504 	 *          or revision >= 0xD8 && revision <= 0xDF
1505 	 * otherwise is PCO FP5
1506 	 */
1507 	if (!strcmp(chip_name, "picasso") &&
1508 		(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1509 		((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1510 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1511 					   AMDGPU_UCODE_REQUIRED,
1512 					   "amdgpu/%s_rlc_am4.bin", chip_name);
1513 	else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1514 		(smu_version >= 0x41e2b))
1515 		/**
1516 		*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1517 		*/
1518 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1519 					   AMDGPU_UCODE_REQUIRED,
1520 					   "amdgpu/%s_kicker_rlc.bin", chip_name);
1521 	else
1522 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1523 					   AMDGPU_UCODE_REQUIRED,
1524 					   "amdgpu/%s_rlc.bin", chip_name);
1525 	if (err)
1526 		goto out;
1527 
1528 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1529 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1530 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1531 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
1532 out:
1533 	if (err)
1534 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
1535 
1536 	return err;
1537 }
1538 
1539 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
1540 {
1541 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
1542 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1543 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0))
1544 		return false;
1545 
1546 	return true;
1547 }
1548 
1549 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1550 					      char *chip_name)
1551 {
1552 	int err;
1553 
1554 	if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1555 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1556 				   AMDGPU_UCODE_REQUIRED,
1557 				   "amdgpu/%s_sjt_mec.bin", chip_name);
1558 	else
1559 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1560 					   AMDGPU_UCODE_REQUIRED,
1561 					   "amdgpu/%s_mec.bin", chip_name);
1562 	if (err)
1563 		goto out;
1564 
1565 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
1566 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
1567 
1568 	if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1569 		if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1570 			err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1571 						   AMDGPU_UCODE_REQUIRED,
1572 						   "amdgpu/%s_sjt_mec2.bin", chip_name);
1573 		else
1574 			err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1575 						   AMDGPU_UCODE_REQUIRED,
1576 						   "amdgpu/%s_mec2.bin", chip_name);
1577 		if (!err) {
1578 			amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
1579 			amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
1580 		} else {
1581 			err = 0;
1582 			amdgpu_ucode_release(&adev->gfx.mec2_fw);
1583 		}
1584 	} else {
1585 		adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
1586 		adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
1587 	}
1588 
1589 	gfx_v9_0_check_if_need_gfxoff(adev);
1590 	gfx_v9_0_check_fw_write_wait(adev);
1591 
1592 out:
1593 	if (err)
1594 		amdgpu_ucode_release(&adev->gfx.mec_fw);
1595 	return err;
1596 }
1597 
1598 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1599 {
1600 	char ucode_prefix[30];
1601 	int r;
1602 
1603 	DRM_DEBUG("\n");
1604 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
1605 
1606 	/* No CPG in Arcturus */
1607 	if (adev->gfx.num_gfx_rings) {
1608 		r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
1609 		if (r)
1610 			return r;
1611 	}
1612 
1613 	r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
1614 	if (r)
1615 		return r;
1616 
1617 	r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
1618 	if (r)
1619 		return r;
1620 
1621 	return r;
1622 }
1623 
1624 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1625 {
1626 	u32 count = 0;
1627 	const struct cs_section_def *sect = NULL;
1628 	const struct cs_extent_def *ext = NULL;
1629 
1630 	/* begin clear state */
1631 	count += 2;
1632 	/* context control state */
1633 	count += 3;
1634 
1635 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1636 		for (ext = sect->section; ext->extent != NULL; ++ext) {
1637 			if (sect->id == SECT_CONTEXT)
1638 				count += 2 + ext->reg_count;
1639 			else
1640 				return 0;
1641 		}
1642 	}
1643 
1644 	/* end clear state */
1645 	count += 2;
1646 	/* clear state */
1647 	count += 2;
1648 
1649 	return count;
1650 }
1651 
1652 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
1653 {
1654 	u32 count = 0;
1655 
1656 	if (adev->gfx.rlc.cs_data == NULL)
1657 		return;
1658 	if (buffer == NULL)
1659 		return;
1660 
1661 	count = amdgpu_gfx_csb_preamble_start(buffer);
1662 	count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
1663 	amdgpu_gfx_csb_preamble_end(buffer, count);
1664 }
1665 
1666 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1667 {
1668 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1669 	uint32_t pg_always_on_cu_num = 2;
1670 	uint32_t always_on_cu_num;
1671 	uint32_t i, j, k;
1672 	uint32_t mask, cu_bitmap, counter;
1673 
1674 	if (adev->flags & AMD_IS_APU)
1675 		always_on_cu_num = 4;
1676 	else if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 2, 1))
1677 		always_on_cu_num = 8;
1678 	else
1679 		always_on_cu_num = 12;
1680 
1681 	mutex_lock(&adev->grbm_idx_mutex);
1682 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1683 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1684 			mask = 1;
1685 			cu_bitmap = 0;
1686 			counter = 0;
1687 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
1688 
1689 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1690 				if (cu_info->bitmap[0][i][j] & mask) {
1691 					if (counter == pg_always_on_cu_num)
1692 						WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1693 					if (counter < always_on_cu_num)
1694 						cu_bitmap |= mask;
1695 					else
1696 						break;
1697 					counter++;
1698 				}
1699 				mask <<= 1;
1700 			}
1701 
1702 			WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1703 			cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1704 		}
1705 	}
1706 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1707 	mutex_unlock(&adev->grbm_idx_mutex);
1708 }
1709 
1710 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1711 {
1712 	uint32_t data;
1713 
1714 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1715 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1716 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1717 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1718 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1719 
1720 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1721 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1722 
1723 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1724 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1725 
1726 	mutex_lock(&adev->grbm_idx_mutex);
1727 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1728 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1729 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1730 
1731 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1732 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1733 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1734 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1735 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1736 
1737 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1738 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1739 	data &= 0x0000FFFF;
1740 	data |= 0x00C00000;
1741 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1742 
1743 	/*
1744 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1745 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1746 	 */
1747 
1748 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1749 	 * but used for RLC_LB_CNTL configuration */
1750 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1751 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1752 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1753 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1754 	mutex_unlock(&adev->grbm_idx_mutex);
1755 
1756 	gfx_v9_0_init_always_on_cu_mask(adev);
1757 }
1758 
1759 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1760 {
1761 	uint32_t data;
1762 
1763 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1764 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1765 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1766 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1767 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1768 
1769 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1770 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1771 
1772 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1773 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1774 
1775 	mutex_lock(&adev->grbm_idx_mutex);
1776 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1777 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1778 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1779 
1780 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1781 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1782 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1783 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1784 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1785 
1786 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1787 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1788 	data &= 0x0000FFFF;
1789 	data |= 0x00C00000;
1790 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1791 
1792 	/*
1793 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1794 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1795 	 */
1796 
1797 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1798 	 * but used for RLC_LB_CNTL configuration */
1799 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1800 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1801 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1802 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1803 	mutex_unlock(&adev->grbm_idx_mutex);
1804 
1805 	gfx_v9_0_init_always_on_cu_mask(adev);
1806 }
1807 
1808 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1809 {
1810 	WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1811 }
1812 
1813 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1814 {
1815 	if (gfx_v9_0_load_mec2_fw_bin_support(adev))
1816 		return 5;
1817 	else
1818 		return 4;
1819 }
1820 
1821 static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1822 {
1823 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1824 
1825 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
1826 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1827 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
1828 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
1829 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
1830 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
1831 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
1832 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
1833 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1834 }
1835 
1836 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1837 {
1838 	const struct cs_section_def *cs_data;
1839 	int r;
1840 
1841 	adev->gfx.rlc.cs_data = gfx9_cs_data;
1842 
1843 	cs_data = adev->gfx.rlc.cs_data;
1844 
1845 	if (cs_data) {
1846 		/* init clear state block */
1847 		r = amdgpu_gfx_rlc_init_csb(adev);
1848 		if (r)
1849 			return r;
1850 	}
1851 
1852 	if (adev->flags & AMD_IS_APU) {
1853 		/* TODO: double check the cp_table_size for RV */
1854 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1855 		r = amdgpu_gfx_rlc_init_cpt(adev);
1856 		if (r)
1857 			return r;
1858 	}
1859 
1860 	return 0;
1861 }
1862 
1863 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1864 {
1865 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1866 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1867 }
1868 
1869 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1870 {
1871 	int r;
1872 	u32 *hpd;
1873 	const __le32 *fw_data;
1874 	unsigned fw_size;
1875 	u32 *fw;
1876 	size_t mec_hpd_size;
1877 
1878 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1879 
1880 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1881 
1882 	/* take ownership of the relevant compute queues */
1883 	amdgpu_gfx_compute_queue_acquire(adev);
1884 	mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1885 	if (mec_hpd_size) {
1886 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1887 					      AMDGPU_GEM_DOMAIN_VRAM |
1888 					      AMDGPU_GEM_DOMAIN_GTT,
1889 					      &adev->gfx.mec.hpd_eop_obj,
1890 					      &adev->gfx.mec.hpd_eop_gpu_addr,
1891 					      (void **)&hpd);
1892 		if (r) {
1893 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1894 			gfx_v9_0_mec_fini(adev);
1895 			return r;
1896 		}
1897 
1898 		memset(hpd, 0, mec_hpd_size);
1899 
1900 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1901 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1902 	}
1903 
1904 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1905 
1906 	fw_data = (const __le32 *)
1907 		(adev->gfx.mec_fw->data +
1908 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1909 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1910 
1911 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1912 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1913 				      &adev->gfx.mec.mec_fw_obj,
1914 				      &adev->gfx.mec.mec_fw_gpu_addr,
1915 				      (void **)&fw);
1916 	if (r) {
1917 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1918 		gfx_v9_0_mec_fini(adev);
1919 		return r;
1920 	}
1921 
1922 	memcpy(fw, fw_data, fw_size);
1923 
1924 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1925 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1926 
1927 	return 0;
1928 }
1929 
1930 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1931 {
1932 	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1933 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1934 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1935 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
1936 		(SQ_IND_INDEX__FORCE_READ_MASK));
1937 	return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1938 }
1939 
1940 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1941 			   uint32_t wave, uint32_t thread,
1942 			   uint32_t regno, uint32_t num, uint32_t *out)
1943 {
1944 	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1945 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1946 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1947 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
1948 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1949 		(SQ_IND_INDEX__FORCE_READ_MASK) |
1950 		(SQ_IND_INDEX__AUTO_INCR_MASK));
1951 	while (num--)
1952 		*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1953 }
1954 
1955 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1956 {
1957 	/* type 1 wave data */
1958 	dst[(*no_fields)++] = 1;
1959 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1960 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1961 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1962 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1963 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1964 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1965 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1966 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1967 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1968 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1969 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1970 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1971 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1972 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1973 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
1974 }
1975 
1976 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1977 				     uint32_t wave, uint32_t start,
1978 				     uint32_t size, uint32_t *dst)
1979 {
1980 	wave_read_regs(
1981 		adev, simd, wave, 0,
1982 		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1983 }
1984 
1985 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1986 				     uint32_t wave, uint32_t thread,
1987 				     uint32_t start, uint32_t size,
1988 				     uint32_t *dst)
1989 {
1990 	wave_read_regs(
1991 		adev, simd, wave, thread,
1992 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1993 }
1994 
1995 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1996 				  u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
1997 {
1998 	soc15_grbm_select(adev, me, pipe, q, vm, 0);
1999 }
2000 
2001 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2002         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2003         .select_se_sh = &gfx_v9_0_select_se_sh,
2004         .read_wave_data = &gfx_v9_0_read_wave_data,
2005         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2006         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2007         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2008 	.get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
2009 };
2010 
2011 const struct amdgpu_ras_block_hw_ops  gfx_v9_0_ras_ops = {
2012 		.ras_error_inject = &gfx_v9_0_ras_error_inject,
2013 		.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2014 		.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2015 };
2016 
2017 static struct amdgpu_gfx_ras gfx_v9_0_ras = {
2018 	.ras_block = {
2019 		.hw_ops = &gfx_v9_0_ras_ops,
2020 	},
2021 };
2022 
2023 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2024 {
2025 	u32 gb_addr_config;
2026 	int err;
2027 
2028 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2029 	case IP_VERSION(9, 0, 1):
2030 		adev->gfx.config.max_hw_contexts = 8;
2031 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2032 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2033 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2034 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2035 		gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2036 		break;
2037 	case IP_VERSION(9, 2, 1):
2038 		adev->gfx.config.max_hw_contexts = 8;
2039 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2040 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2041 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2042 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2043 		gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2044 		drm_info(adev_to_drm(adev), "fix gfx.config for vega12\n");
2045 		break;
2046 	case IP_VERSION(9, 4, 0):
2047 		adev->gfx.ras = &gfx_v9_0_ras;
2048 		adev->gfx.config.max_hw_contexts = 8;
2049 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2050 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2051 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2052 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2053 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2054 		gb_addr_config &= ~0xf3e777ff;
2055 		gb_addr_config |= 0x22014042;
2056 		/* check vbios table if gpu info is not available */
2057 		err = amdgpu_atomfirmware_get_gfx_info(adev);
2058 		if (err)
2059 			return err;
2060 		break;
2061 	case IP_VERSION(9, 2, 2):
2062 	case IP_VERSION(9, 1, 0):
2063 		adev->gfx.config.max_hw_contexts = 8;
2064 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2065 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2066 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2067 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2068 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2069 			gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2070 		else
2071 			gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2072 		break;
2073 	case IP_VERSION(9, 4, 1):
2074 		adev->gfx.ras = &gfx_v9_4_ras;
2075 		adev->gfx.config.max_hw_contexts = 8;
2076 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2077 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2078 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2079 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2080 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2081 		gb_addr_config &= ~0xf3e777ff;
2082 		gb_addr_config |= 0x22014042;
2083 		break;
2084 	case IP_VERSION(9, 3, 0):
2085 		adev->gfx.config.max_hw_contexts = 8;
2086 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2087 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2088 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2089 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2090 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2091 		gb_addr_config &= ~0xf3e777ff;
2092 		gb_addr_config |= 0x22010042;
2093 		break;
2094 	case IP_VERSION(9, 4, 2):
2095 		adev->gfx.ras = &gfx_v9_4_2_ras;
2096 		adev->gfx.config.max_hw_contexts = 8;
2097 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2098 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2099 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2100 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2101 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2102 		gb_addr_config &= ~0xf3e777ff;
2103 		gb_addr_config |= 0x22014042;
2104 		/* check vbios table if gpu info is not available */
2105 		err = amdgpu_atomfirmware_get_gfx_info(adev);
2106 		if (err)
2107 			return err;
2108 		break;
2109 	default:
2110 		BUG();
2111 		break;
2112 	}
2113 
2114 	adev->gfx.config.gb_addr_config = gb_addr_config;
2115 
2116 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2117 			REG_GET_FIELD(
2118 					adev->gfx.config.gb_addr_config,
2119 					GB_ADDR_CONFIG,
2120 					NUM_PIPES);
2121 
2122 	adev->gfx.config.max_tile_pipes =
2123 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2124 
2125 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2126 			REG_GET_FIELD(
2127 					adev->gfx.config.gb_addr_config,
2128 					GB_ADDR_CONFIG,
2129 					NUM_BANKS);
2130 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2131 			REG_GET_FIELD(
2132 					adev->gfx.config.gb_addr_config,
2133 					GB_ADDR_CONFIG,
2134 					MAX_COMPRESSED_FRAGS);
2135 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2136 			REG_GET_FIELD(
2137 					adev->gfx.config.gb_addr_config,
2138 					GB_ADDR_CONFIG,
2139 					NUM_RB_PER_SE);
2140 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2141 			REG_GET_FIELD(
2142 					adev->gfx.config.gb_addr_config,
2143 					GB_ADDR_CONFIG,
2144 					NUM_SHADER_ENGINES);
2145 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2146 			REG_GET_FIELD(
2147 					adev->gfx.config.gb_addr_config,
2148 					GB_ADDR_CONFIG,
2149 					PIPE_INTERLEAVE_SIZE));
2150 
2151 	return 0;
2152 }
2153 
2154 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2155 				      int mec, int pipe, int queue)
2156 {
2157 	unsigned irq_type;
2158 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2159 	unsigned int hw_prio;
2160 
2161 	ring = &adev->gfx.compute_ring[ring_id];
2162 
2163 	/* mec0 is me1 */
2164 	ring->me = mec + 1;
2165 	ring->pipe = pipe;
2166 	ring->queue = queue;
2167 
2168 	ring->ring_obj = NULL;
2169 	ring->use_doorbell = true;
2170 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2171 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2172 				+ (ring_id * GFX9_MEC_HPD_SIZE);
2173 	ring->vm_hub = AMDGPU_GFXHUB(0);
2174 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2175 
2176 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2177 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2178 		+ ring->pipe;
2179 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
2180 			AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
2181 	/* type-2 packets are deprecated on MEC, use type-3 instead */
2182 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
2183 				hw_prio, NULL);
2184 }
2185 
2186 static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
2187 {
2188 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
2189 	uint32_t *ptr;
2190 	uint32_t inst;
2191 
2192 	ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
2193 	if (!ptr) {
2194 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
2195 		adev->gfx.ip_dump_core = NULL;
2196 	} else {
2197 		adev->gfx.ip_dump_core = ptr;
2198 	}
2199 
2200 	/* Allocate memory for compute queue registers for all the instances */
2201 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
2202 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
2203 		adev->gfx.mec.num_queue_per_pipe;
2204 
2205 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
2206 	if (!ptr) {
2207 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
2208 		adev->gfx.ip_dump_compute_queues = NULL;
2209 	} else {
2210 		adev->gfx.ip_dump_compute_queues = ptr;
2211 	}
2212 }
2213 
2214 static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
2215 {
2216 	int i, j, k, r, ring_id;
2217 	int xcc_id = 0;
2218 	struct amdgpu_ring *ring;
2219 	struct amdgpu_device *adev = ip_block->adev;
2220 	unsigned int hw_prio;
2221 
2222 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2223 	case IP_VERSION(9, 0, 1):
2224 	case IP_VERSION(9, 2, 1):
2225 	case IP_VERSION(9, 4, 0):
2226 	case IP_VERSION(9, 2, 2):
2227 	case IP_VERSION(9, 1, 0):
2228 	case IP_VERSION(9, 4, 1):
2229 	case IP_VERSION(9, 3, 0):
2230 	case IP_VERSION(9, 4, 2):
2231 		adev->gfx.mec.num_mec = 2;
2232 		break;
2233 	default:
2234 		adev->gfx.mec.num_mec = 1;
2235 		break;
2236 	}
2237 
2238 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2239 	case IP_VERSION(9, 0, 1):
2240 	case IP_VERSION(9, 2, 1):
2241 	case IP_VERSION(9, 4, 0):
2242 	case IP_VERSION(9, 2, 2):
2243 	case IP_VERSION(9, 1, 0):
2244 	case IP_VERSION(9, 3, 0):
2245 		adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
2246 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
2247 		if (adev->gfx.me_fw_version  >= 167 &&
2248 		    adev->gfx.pfp_fw_version >= 196 &&
2249 		    adev->gfx.mec_fw_version >= 474) {
2250 			adev->gfx.enable_cleaner_shader = true;
2251 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
2252 			if (r) {
2253 				adev->gfx.enable_cleaner_shader = false;
2254 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
2255 			}
2256 		}
2257 		break;
2258 	case IP_VERSION(9, 4, 2):
2259 		adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
2260 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
2261 		if (adev->gfx.mec_fw_version >= 88) {
2262 			adev->gfx.enable_cleaner_shader = true;
2263 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
2264 			if (r) {
2265 				adev->gfx.enable_cleaner_shader = false;
2266 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
2267 			}
2268 		}
2269 		break;
2270 	default:
2271 		adev->gfx.enable_cleaner_shader = false;
2272 		break;
2273 	}
2274 
2275 	adev->gfx.mec.num_pipe_per_mec = 4;
2276 	adev->gfx.mec.num_queue_per_pipe = 8;
2277 
2278 	/* EOP Event */
2279 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2280 	if (r)
2281 		return r;
2282 
2283 	/* Bad opcode Event */
2284 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
2285 			      GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
2286 			      &adev->gfx.bad_op_irq);
2287 	if (r)
2288 		return r;
2289 
2290 	/* Privileged reg */
2291 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2292 			      &adev->gfx.priv_reg_irq);
2293 	if (r)
2294 		return r;
2295 
2296 	/* Privileged inst */
2297 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2298 			      &adev->gfx.priv_inst_irq);
2299 	if (r)
2300 		return r;
2301 
2302 	/* ECC error */
2303 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2304 			      &adev->gfx.cp_ecc_error_irq);
2305 	if (r)
2306 		return r;
2307 
2308 	/* FUE error */
2309 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2310 			      &adev->gfx.cp_ecc_error_irq);
2311 	if (r)
2312 		return r;
2313 
2314 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2315 
2316 	if (adev->gfx.rlc.funcs) {
2317 		if (adev->gfx.rlc.funcs->init) {
2318 			r = adev->gfx.rlc.funcs->init(adev);
2319 			if (r) {
2320 				dev_err(adev->dev, "Failed to init rlc BOs!\n");
2321 				return r;
2322 			}
2323 		}
2324 	}
2325 
2326 	r = gfx_v9_0_mec_init(adev);
2327 	if (r) {
2328 		DRM_ERROR("Failed to init MEC BOs!\n");
2329 		return r;
2330 	}
2331 
2332 	/* set up the gfx ring */
2333 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2334 		ring = &adev->gfx.gfx_ring[i];
2335 		ring->ring_obj = NULL;
2336 		if (!i)
2337 			sprintf(ring->name, "gfx");
2338 		else
2339 			sprintf(ring->name, "gfx_%d", i);
2340 		ring->use_doorbell = true;
2341 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2342 
2343 		/* disable scheduler on the real ring */
2344 		ring->no_scheduler = adev->gfx.mcbp;
2345 		ring->vm_hub = AMDGPU_GFXHUB(0);
2346 		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2347 				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2348 				     AMDGPU_RING_PRIO_DEFAULT, NULL);
2349 		if (r)
2350 			return r;
2351 	}
2352 
2353 	/* set up the software rings */
2354 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
2355 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
2356 			ring = &adev->gfx.sw_gfx_ring[i];
2357 			ring->ring_obj = NULL;
2358 			sprintf(ring->name, amdgpu_sw_ring_name(i));
2359 			ring->use_doorbell = true;
2360 			ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2361 			ring->is_sw_ring = true;
2362 			hw_prio = amdgpu_sw_ring_priority(i);
2363 			ring->vm_hub = AMDGPU_GFXHUB(0);
2364 			r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2365 					     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
2366 					     NULL);
2367 			if (r)
2368 				return r;
2369 			ring->wptr = 0;
2370 		}
2371 
2372 		/* init the muxer and add software rings */
2373 		r = amdgpu_ring_mux_init(&adev->gfx.muxer, &adev->gfx.gfx_ring[0],
2374 					 GFX9_NUM_SW_GFX_RINGS);
2375 		if (r) {
2376 			DRM_ERROR("amdgpu_ring_mux_init failed(%d)\n", r);
2377 			return r;
2378 		}
2379 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
2380 			r = amdgpu_ring_mux_add_sw_ring(&adev->gfx.muxer,
2381 							&adev->gfx.sw_gfx_ring[i]);
2382 			if (r) {
2383 				DRM_ERROR("amdgpu_ring_mux_add_sw_ring failed(%d)\n", r);
2384 				return r;
2385 			}
2386 		}
2387 	}
2388 
2389 	/* set up the compute queues - allocate horizontally across pipes */
2390 	ring_id = 0;
2391 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2392 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2393 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2394 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
2395 								     k, j))
2396 					continue;
2397 
2398 				r = gfx_v9_0_compute_ring_init(adev,
2399 							       ring_id,
2400 							       i, k, j);
2401 				if (r)
2402 					return r;
2403 
2404 				ring_id++;
2405 			}
2406 		}
2407 	}
2408 
2409 	/* TODO: Add queue reset mask when FW fully supports it */
2410 	adev->gfx.gfx_supported_reset =
2411 		amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
2412 	adev->gfx.compute_supported_reset =
2413 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
2414 	if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset)
2415 		adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
2416 
2417 	r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
2418 	if (r) {
2419 		DRM_ERROR("Failed to init KIQ BOs!\n");
2420 		return r;
2421 	}
2422 
2423 	r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
2424 	if (r)
2425 		return r;
2426 
2427 	/* create MQD for all compute queues as wel as KIQ for SRIOV case */
2428 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0);
2429 	if (r)
2430 		return r;
2431 
2432 	adev->gfx.ce_ram_size = 0x8000;
2433 
2434 	r = gfx_v9_0_gpu_early_init(adev);
2435 	if (r)
2436 		return r;
2437 
2438 	if (amdgpu_gfx_ras_sw_init(adev)) {
2439 		dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
2440 		return -EINVAL;
2441 	}
2442 
2443 	gfx_v9_0_alloc_ip_dump(adev);
2444 
2445 	r = amdgpu_gfx_sysfs_init(adev);
2446 	if (r)
2447 		return r;
2448 
2449 	return 0;
2450 }
2451 
2452 
2453 static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
2454 {
2455 	int i;
2456 	struct amdgpu_device *adev = ip_block->adev;
2457 
2458 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
2459 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
2460 			amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]);
2461 		amdgpu_ring_mux_fini(&adev->gfx.muxer);
2462 	}
2463 
2464 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2465 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2466 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
2467 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2468 
2469 	amdgpu_gfx_mqd_sw_fini(adev, 0);
2470 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2471 	amdgpu_gfx_kiq_fini(adev, 0);
2472 
2473 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
2474 
2475 	gfx_v9_0_mec_fini(adev);
2476 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2477 				&adev->gfx.rlc.clear_state_gpu_addr,
2478 				(void **)&adev->gfx.rlc.cs_ptr);
2479 	if (adev->flags & AMD_IS_APU) {
2480 		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2481 				&adev->gfx.rlc.cp_table_gpu_addr,
2482 				(void **)&adev->gfx.rlc.cp_table_ptr);
2483 	}
2484 	gfx_v9_0_free_microcode(adev);
2485 
2486 	amdgpu_gfx_sysfs_fini(adev);
2487 
2488 	kfree(adev->gfx.ip_dump_core);
2489 	kfree(adev->gfx.ip_dump_compute_queues);
2490 
2491 	return 0;
2492 }
2493 
2494 
2495 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2496 {
2497 	/* TODO */
2498 }
2499 
2500 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2501 			   u32 instance, int xcc_id)
2502 {
2503 	u32 data;
2504 
2505 	if (instance == 0xffffffff)
2506 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2507 	else
2508 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2509 
2510 	if (se_num == 0xffffffff)
2511 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2512 	else
2513 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2514 
2515 	if (sh_num == 0xffffffff)
2516 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2517 	else
2518 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2519 
2520 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2521 }
2522 
2523 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2524 {
2525 	u32 data, mask;
2526 
2527 	data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2528 	data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2529 
2530 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2531 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2532 
2533 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2534 					 adev->gfx.config.max_sh_per_se);
2535 
2536 	return (~data) & mask;
2537 }
2538 
2539 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2540 {
2541 	int i, j;
2542 	u32 data;
2543 	u32 active_rbs = 0;
2544 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2545 					adev->gfx.config.max_sh_per_se;
2546 
2547 	mutex_lock(&adev->grbm_idx_mutex);
2548 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2549 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2550 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
2551 			data = gfx_v9_0_get_rb_active_bitmap(adev);
2552 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2553 					       rb_bitmap_width_per_sh);
2554 		}
2555 	}
2556 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2557 	mutex_unlock(&adev->grbm_idx_mutex);
2558 
2559 	adev->gfx.config.backend_enable_mask = active_rbs;
2560 	adev->gfx.config.num_rbs = hweight32(active_rbs);
2561 }
2562 
2563 static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev,
2564 				uint32_t first_vmid,
2565 				uint32_t last_vmid)
2566 {
2567 	uint32_t data;
2568 	uint32_t trap_config_vmid_mask = 0;
2569 	int i;
2570 
2571 	/* Calculate trap config vmid mask */
2572 	for (i = first_vmid; i < last_vmid; i++)
2573 		trap_config_vmid_mask |= (1 << i);
2574 
2575 	data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG,
2576 			VMID_SEL, trap_config_vmid_mask);
2577 	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
2578 			TRAP_EN, 1);
2579 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
2580 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
2581 
2582 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
2583 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
2584 }
2585 
2586 #define DEFAULT_SH_MEM_BASES	(0x6000)
2587 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2588 {
2589 	int i;
2590 	uint32_t sh_mem_config;
2591 	uint32_t sh_mem_bases;
2592 
2593 	/*
2594 	 * Configure apertures:
2595 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2596 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2597 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2598 	 */
2599 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2600 
2601 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2602 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2603 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2604 
2605 	mutex_lock(&adev->srbm_mutex);
2606 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2607 		soc15_grbm_select(adev, 0, 0, 0, i, 0);
2608 		/* CP and shaders */
2609 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2610 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2611 	}
2612 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
2613 	mutex_unlock(&adev->srbm_mutex);
2614 
2615 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
2616 	   access. These should be enabled by FW for target VMIDs. */
2617 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2618 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2619 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2620 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2621 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2622 	}
2623 }
2624 
2625 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2626 {
2627 	int vmid;
2628 
2629 	/*
2630 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2631 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
2632 	 * the driver can enable them for graphics. VMID0 should maintain
2633 	 * access so that HWS firmware can save/restore entries.
2634 	 */
2635 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2636 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2637 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2638 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2639 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2640 	}
2641 }
2642 
2643 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2644 {
2645 	uint32_t tmp;
2646 
2647 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2648 	case IP_VERSION(9, 4, 1):
2649 		tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2650 		tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
2651 				!READ_ONCE(adev->barrier_has_auto_waitcnt));
2652 		WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2653 		break;
2654 	case IP_VERSION(9, 4, 2):
2655 		gfx_v9_4_2_init_sq(adev);
2656 		break;
2657 	default:
2658 		break;
2659 	}
2660 }
2661 
2662 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2663 {
2664 	u32 tmp;
2665 	int i;
2666 
2667 	if (!amdgpu_sriov_vf(adev) ||
2668 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) {
2669 		WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2670 	}
2671 
2672 	gfx_v9_0_tiling_mode_table_init(adev);
2673 
2674 	if (adev->gfx.num_gfx_rings)
2675 		gfx_v9_0_setup_rb(adev);
2676 	gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2677 	adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2678 
2679 	/* XXX SH_MEM regs */
2680 	/* where to put LDS, scratch, GPUVM in FSA64 space */
2681 	mutex_lock(&adev->srbm_mutex);
2682 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
2683 		soc15_grbm_select(adev, 0, 0, 0, i, 0);
2684 		/* CP and shaders */
2685 		if (i == 0) {
2686 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2687 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2688 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2689 					    !!adev->gmc.noretry);
2690 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2691 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2692 		} else {
2693 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2694 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2695 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2696 					    !!adev->gmc.noretry);
2697 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2698 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2699 				(adev->gmc.private_aperture_start >> 48));
2700 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2701 				(adev->gmc.shared_aperture_start >> 48));
2702 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2703 		}
2704 	}
2705 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
2706 
2707 	mutex_unlock(&adev->srbm_mutex);
2708 
2709 	gfx_v9_0_init_compute_vmid(adev);
2710 	gfx_v9_0_init_gds_vmid(adev);
2711 	gfx_v9_0_init_sq_config(adev);
2712 }
2713 
2714 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2715 {
2716 	u32 i, j, k;
2717 	u32 mask;
2718 
2719 	mutex_lock(&adev->grbm_idx_mutex);
2720 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2721 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2722 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
2723 			for (k = 0; k < adev->usec_timeout; k++) {
2724 				if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2725 					break;
2726 				udelay(1);
2727 			}
2728 			if (k == adev->usec_timeout) {
2729 				amdgpu_gfx_select_se_sh(adev, 0xffffffff,
2730 						      0xffffffff, 0xffffffff, 0);
2731 				mutex_unlock(&adev->grbm_idx_mutex);
2732 				drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n",
2733 					 i, j);
2734 				return;
2735 			}
2736 		}
2737 	}
2738 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2739 	mutex_unlock(&adev->grbm_idx_mutex);
2740 
2741 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2742 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2743 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2744 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2745 	for (k = 0; k < adev->usec_timeout; k++) {
2746 		if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2747 			break;
2748 		udelay(1);
2749 	}
2750 }
2751 
2752 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2753 					       bool enable)
2754 {
2755 	u32 tmp;
2756 
2757 	/* These interrupts should be enabled to drive DS clock */
2758 
2759 	tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2760 
2761 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2762 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2763 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2764 	if (adev->gfx.num_gfx_rings)
2765 		tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2766 
2767 	WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2768 }
2769 
2770 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2771 {
2772 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2773 	/* csib */
2774 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2775 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2776 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2777 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2778 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2779 			adev->gfx.rlc.clear_state_size);
2780 }
2781 
2782 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2783 				int indirect_offset,
2784 				int list_size,
2785 				int *unique_indirect_regs,
2786 				int unique_indirect_reg_count,
2787 				int *indirect_start_offsets,
2788 				int *indirect_start_offsets_count,
2789 				int max_start_offsets_count)
2790 {
2791 	int idx;
2792 
2793 	for (; indirect_offset < list_size; indirect_offset++) {
2794 		WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2795 		indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2796 		*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2797 
2798 		while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2799 			indirect_offset += 2;
2800 
2801 			/* look for the matching indice */
2802 			for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2803 				if (unique_indirect_regs[idx] ==
2804 					register_list_format[indirect_offset] ||
2805 					!unique_indirect_regs[idx])
2806 					break;
2807 			}
2808 
2809 			BUG_ON(idx >= unique_indirect_reg_count);
2810 
2811 			if (!unique_indirect_regs[idx])
2812 				unique_indirect_regs[idx] = register_list_format[indirect_offset];
2813 
2814 			indirect_offset++;
2815 		}
2816 	}
2817 }
2818 
2819 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2820 {
2821 	int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2822 	int unique_indirect_reg_count = 0;
2823 
2824 	int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2825 	int indirect_start_offsets_count = 0;
2826 
2827 	int list_size = 0;
2828 	int i = 0, j = 0;
2829 	u32 tmp = 0;
2830 
2831 	u32 *register_list_format =
2832 		kmemdup(adev->gfx.rlc.register_list_format,
2833 			adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2834 	if (!register_list_format)
2835 		return -ENOMEM;
2836 
2837 	/* setup unique_indirect_regs array and indirect_start_offsets array */
2838 	unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2839 	gfx_v9_1_parse_ind_reg_list(register_list_format,
2840 				    adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2841 				    adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2842 				    unique_indirect_regs,
2843 				    unique_indirect_reg_count,
2844 				    indirect_start_offsets,
2845 				    &indirect_start_offsets_count,
2846 				    ARRAY_SIZE(indirect_start_offsets));
2847 
2848 	/* enable auto inc in case it is disabled */
2849 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2850 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2851 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2852 
2853 	/* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2854 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2855 		RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2856 	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2857 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2858 			adev->gfx.rlc.register_restore[i]);
2859 
2860 	/* load indirect register */
2861 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2862 		adev->gfx.rlc.reg_list_format_start);
2863 
2864 	/* direct register portion */
2865 	for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2866 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2867 			register_list_format[i]);
2868 
2869 	/* indirect register portion */
2870 	while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2871 		if (register_list_format[i] == 0xFFFFFFFF) {
2872 			WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2873 			continue;
2874 		}
2875 
2876 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2877 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2878 
2879 		for (j = 0; j < unique_indirect_reg_count; j++) {
2880 			if (register_list_format[i] == unique_indirect_regs[j]) {
2881 				WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2882 				break;
2883 			}
2884 		}
2885 
2886 		BUG_ON(j >= unique_indirect_reg_count);
2887 
2888 		i++;
2889 	}
2890 
2891 	/* set save/restore list size */
2892 	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2893 	list_size = list_size >> 1;
2894 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2895 		adev->gfx.rlc.reg_restore_list_size);
2896 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2897 
2898 	/* write the starting offsets to RLC scratch ram */
2899 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2900 		adev->gfx.rlc.starting_offsets_start);
2901 	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2902 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2903 		       indirect_start_offsets[i]);
2904 
2905 	/* load unique indirect regs*/
2906 	for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2907 		if (unique_indirect_regs[i] != 0) {
2908 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2909 			       + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2910 			       unique_indirect_regs[i] & 0x3FFFF);
2911 
2912 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2913 			       + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2914 			       unique_indirect_regs[i] >> 20);
2915 		}
2916 	}
2917 
2918 	kfree(register_list_format);
2919 	return 0;
2920 }
2921 
2922 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2923 {
2924 	WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2925 }
2926 
2927 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2928 					     bool enable)
2929 {
2930 	uint32_t data = 0;
2931 	uint32_t default_data = 0;
2932 
2933 	default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2934 	if (enable) {
2935 		/* enable GFXIP control over CGPG */
2936 		data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2937 		if(default_data != data)
2938 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2939 
2940 		/* update status */
2941 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2942 		data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2943 		if(default_data != data)
2944 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2945 	} else {
2946 		/* restore GFXIP control over GCPG */
2947 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2948 		if(default_data != data)
2949 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2950 	}
2951 }
2952 
2953 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2954 {
2955 	uint32_t data = 0;
2956 
2957 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2958 			      AMD_PG_SUPPORT_GFX_SMG |
2959 			      AMD_PG_SUPPORT_GFX_DMG)) {
2960 		/* init IDLE_POLL_COUNT = 60 */
2961 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2962 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2963 		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2964 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2965 
2966 		/* init RLC PG Delay */
2967 		data = 0;
2968 		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2969 		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2970 		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2971 		data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2972 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2973 
2974 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2975 		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2976 		data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2977 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2978 
2979 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2980 		data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2981 		data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2982 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2983 
2984 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2985 		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2986 
2987 		/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2988 		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2989 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2990 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 3, 0))
2991 			pwr_10_0_gfxip_control_over_cgpg(adev, true);
2992 	}
2993 }
2994 
2995 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2996 						bool enable)
2997 {
2998 	uint32_t data = 0;
2999 	uint32_t default_data = 0;
3000 
3001 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3002 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3003 			     SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
3004 			     enable ? 1 : 0);
3005 	if (default_data != data)
3006 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3007 }
3008 
3009 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
3010 						bool enable)
3011 {
3012 	uint32_t data = 0;
3013 	uint32_t default_data = 0;
3014 
3015 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3016 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3017 			     SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
3018 			     enable ? 1 : 0);
3019 	if(default_data != data)
3020 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3021 }
3022 
3023 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
3024 					bool enable)
3025 {
3026 	uint32_t data = 0;
3027 	uint32_t default_data = 0;
3028 
3029 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3030 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3031 			     CP_PG_DISABLE,
3032 			     enable ? 0 : 1);
3033 	if(default_data != data)
3034 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3035 }
3036 
3037 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
3038 						bool enable)
3039 {
3040 	uint32_t data, default_data;
3041 
3042 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3043 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3044 			     GFX_POWER_GATING_ENABLE,
3045 			     enable ? 1 : 0);
3046 	if(default_data != data)
3047 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3048 }
3049 
3050 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
3051 						bool enable)
3052 {
3053 	uint32_t data, default_data;
3054 
3055 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3056 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3057 			     GFX_PIPELINE_PG_ENABLE,
3058 			     enable ? 1 : 0);
3059 	if(default_data != data)
3060 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3061 
3062 	if (!enable)
3063 		/* read any GFX register to wake up GFX */
3064 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
3065 }
3066 
3067 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
3068 						       bool enable)
3069 {
3070 	uint32_t data, default_data;
3071 
3072 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3073 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3074 			     STATIC_PER_CU_PG_ENABLE,
3075 			     enable ? 1 : 0);
3076 	if(default_data != data)
3077 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3078 }
3079 
3080 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
3081 						bool enable)
3082 {
3083 	uint32_t data, default_data;
3084 
3085 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3086 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3087 			     DYN_PER_CU_PG_ENABLE,
3088 			     enable ? 1 : 0);
3089 	if(default_data != data)
3090 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3091 }
3092 
3093 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
3094 {
3095 	gfx_v9_0_init_csb(adev);
3096 
3097 	/*
3098 	 * Rlc save restore list is workable since v2_1.
3099 	 * And it's needed by gfxoff feature.
3100 	 */
3101 	if (adev->gfx.rlc.is_rlc_v2_1) {
3102 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
3103 			    IP_VERSION(9, 2, 1) ||
3104 		    (adev->apu_flags & AMD_APU_IS_RAVEN2))
3105 			gfx_v9_1_init_rlc_save_restore_list(adev);
3106 		gfx_v9_0_enable_save_restore_machine(adev);
3107 	}
3108 
3109 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3110 			      AMD_PG_SUPPORT_GFX_SMG |
3111 			      AMD_PG_SUPPORT_GFX_DMG |
3112 			      AMD_PG_SUPPORT_CP |
3113 			      AMD_PG_SUPPORT_GDS |
3114 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
3115 		WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
3116 			     adev->gfx.rlc.cp_table_gpu_addr >> 8);
3117 		gfx_v9_0_init_gfx_power_gating(adev);
3118 	}
3119 }
3120 
3121 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3122 {
3123 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3124 	gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3125 	gfx_v9_0_wait_for_rlc_serdes(adev);
3126 }
3127 
3128 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3129 {
3130 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3131 	udelay(50);
3132 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3133 	udelay(50);
3134 }
3135 
3136 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3137 {
3138 #ifdef AMDGPU_RLC_DEBUG_RETRY
3139 	u32 rlc_ucode_ver;
3140 #endif
3141 
3142 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3143 	udelay(50);
3144 
3145 	/* carrizo do enable cp interrupt after cp inited */
3146 	if (!(adev->flags & AMD_IS_APU)) {
3147 		gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3148 		udelay(50);
3149 	}
3150 
3151 #ifdef AMDGPU_RLC_DEBUG_RETRY
3152 	/* RLC_GPM_GENERAL_6 : RLC Ucode version */
3153 	rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3154 	if(rlc_ucode_ver == 0x108) {
3155 		drm_info(adev_to_drm(adev), "Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i\n",
3156 				rlc_ucode_ver, adev->gfx.rlc_fw_version);
3157 		/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3158 		 * default is 0x9C4 to create a 100us interval */
3159 		WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3160 		/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3161 		 * to disable the page fault retry interrupts, default is
3162 		 * 0x100 (256) */
3163 		WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3164 	}
3165 #endif
3166 }
3167 
3168 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3169 {
3170 	const struct rlc_firmware_header_v2_0 *hdr;
3171 	const __le32 *fw_data;
3172 	unsigned i, fw_size;
3173 
3174 	if (!adev->gfx.rlc_fw)
3175 		return -EINVAL;
3176 
3177 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3178 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3179 
3180 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3181 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3182 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3183 
3184 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3185 			RLCG_UCODE_LOADING_START_ADDRESS);
3186 	for (i = 0; i < fw_size; i++)
3187 		WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3188 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3189 
3190 	return 0;
3191 }
3192 
3193 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3194 {
3195 	int r;
3196 
3197 	if (amdgpu_sriov_vf(adev)) {
3198 		gfx_v9_0_init_csb(adev);
3199 		return 0;
3200 	}
3201 
3202 	adev->gfx.rlc.funcs->stop(adev);
3203 
3204 	/* disable CG */
3205 	WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3206 
3207 	gfx_v9_0_init_pg(adev);
3208 
3209 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3210 		/* legacy rlc firmware loading */
3211 		r = gfx_v9_0_rlc_load_microcode(adev);
3212 		if (r)
3213 			return r;
3214 	}
3215 
3216 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3217 	case IP_VERSION(9, 2, 2):
3218 	case IP_VERSION(9, 1, 0):
3219 		gfx_v9_0_init_lbpw(adev);
3220 		if (amdgpu_lbpw == 0)
3221 			gfx_v9_0_enable_lbpw(adev, false);
3222 		else
3223 			gfx_v9_0_enable_lbpw(adev, true);
3224 		break;
3225 	case IP_VERSION(9, 4, 0):
3226 		gfx_v9_4_init_lbpw(adev);
3227 		if (amdgpu_lbpw > 0)
3228 			gfx_v9_0_enable_lbpw(adev, true);
3229 		else
3230 			gfx_v9_0_enable_lbpw(adev, false);
3231 		break;
3232 	default:
3233 		break;
3234 	}
3235 
3236 	gfx_v9_0_update_spm_vmid_internal(adev, 0xf);
3237 
3238 	adev->gfx.rlc.funcs->start(adev);
3239 
3240 	return 0;
3241 }
3242 
3243 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3244 {
3245 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3246 
3247 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1);
3248 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1);
3249 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1);
3250 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1);
3251 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1);
3252 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1);
3253 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1);
3254 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1);
3255 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1);
3256 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3257 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3258 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3259 	WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3260 	udelay(50);
3261 }
3262 
3263 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3264 {
3265 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
3266 	const struct gfx_firmware_header_v1_0 *ce_hdr;
3267 	const struct gfx_firmware_header_v1_0 *me_hdr;
3268 	const __le32 *fw_data;
3269 	unsigned i, fw_size;
3270 
3271 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3272 		return -EINVAL;
3273 
3274 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3275 		adev->gfx.pfp_fw->data;
3276 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3277 		adev->gfx.ce_fw->data;
3278 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
3279 		adev->gfx.me_fw->data;
3280 
3281 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3282 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3283 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3284 
3285 	gfx_v9_0_cp_gfx_enable(adev, false);
3286 
3287 	/* PFP */
3288 	fw_data = (const __le32 *)
3289 		(adev->gfx.pfp_fw->data +
3290 		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3291 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3292 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3293 	for (i = 0; i < fw_size; i++)
3294 		WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3295 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3296 
3297 	/* CE */
3298 	fw_data = (const __le32 *)
3299 		(adev->gfx.ce_fw->data +
3300 		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3301 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3302 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3303 	for (i = 0; i < fw_size; i++)
3304 		WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3305 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3306 
3307 	/* ME */
3308 	fw_data = (const __le32 *)
3309 		(adev->gfx.me_fw->data +
3310 		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3311 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3312 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3313 	for (i = 0; i < fw_size; i++)
3314 		WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3315 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3316 
3317 	return 0;
3318 }
3319 
3320 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3321 {
3322 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3323 	const struct cs_section_def *sect = NULL;
3324 	const struct cs_extent_def *ext = NULL;
3325 	int r, i, tmp;
3326 
3327 	/* init the CP */
3328 	WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3329 	WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3330 
3331 	gfx_v9_0_cp_gfx_enable(adev, true);
3332 
3333 	/* Now only limit the quirk on the APU gfx9 series and already
3334 	 * confirmed that the APU gfx10/gfx11 needn't such update.
3335 	 */
3336 	if (adev->flags & AMD_IS_APU &&
3337 			adev->in_s3 && !pm_resume_via_firmware()) {
3338 		drm_info(adev_to_drm(adev), "Will skip the CSB packet resubmit\n");
3339 		return 0;
3340 	}
3341 	r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3342 	if (r) {
3343 		drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r);
3344 		return r;
3345 	}
3346 
3347 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3348 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3349 
3350 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3351 	amdgpu_ring_write(ring, 0x80000000);
3352 	amdgpu_ring_write(ring, 0x80000000);
3353 
3354 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3355 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3356 			if (sect->id == SECT_CONTEXT) {
3357 				amdgpu_ring_write(ring,
3358 				       PACKET3(PACKET3_SET_CONTEXT_REG,
3359 					       ext->reg_count));
3360 				amdgpu_ring_write(ring,
3361 				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3362 				for (i = 0; i < ext->reg_count; i++)
3363 					amdgpu_ring_write(ring, ext->extent[i]);
3364 			}
3365 		}
3366 	}
3367 
3368 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3369 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3370 
3371 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3372 	amdgpu_ring_write(ring, 0);
3373 
3374 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3375 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3376 	amdgpu_ring_write(ring, 0x8000);
3377 	amdgpu_ring_write(ring, 0x8000);
3378 
3379 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3380 	tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3381 		(SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3382 	amdgpu_ring_write(ring, tmp);
3383 	amdgpu_ring_write(ring, 0);
3384 
3385 	amdgpu_ring_commit(ring);
3386 
3387 	return 0;
3388 }
3389 
3390 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3391 {
3392 	struct amdgpu_ring *ring;
3393 	u32 tmp;
3394 	u32 rb_bufsz;
3395 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
3396 
3397 	/* Set the write pointer delay */
3398 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3399 
3400 	/* set the RB to use vmid 0 */
3401 	WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3402 
3403 	/* Set ring buffer size */
3404 	ring = &adev->gfx.gfx_ring[0];
3405 	rb_bufsz = order_base_2(ring->ring_size / 8);
3406 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3407 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3408 #ifdef __BIG_ENDIAN
3409 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3410 #endif
3411 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3412 
3413 	/* Initialize the ring buffer's write pointers */
3414 	ring->wptr = 0;
3415 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3416 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3417 
3418 	/* set the wb address whether it's enabled or not */
3419 	rptr_addr = ring->rptr_gpu_addr;
3420 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3421 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3422 
3423 	wptr_gpu_addr = ring->wptr_gpu_addr;
3424 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3425 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3426 
3427 	mdelay(1);
3428 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3429 
3430 	rb_addr = ring->gpu_addr >> 8;
3431 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3432 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3433 
3434 	tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3435 	if (ring->use_doorbell) {
3436 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3437 				    DOORBELL_OFFSET, ring->doorbell_index);
3438 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3439 				    DOORBELL_EN, 1);
3440 	} else {
3441 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3442 	}
3443 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3444 
3445 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3446 			DOORBELL_RANGE_LOWER, ring->doorbell_index);
3447 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3448 
3449 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3450 		       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3451 
3452 
3453 	/* start the ring */
3454 	gfx_v9_0_cp_gfx_start(adev);
3455 
3456 	return 0;
3457 }
3458 
3459 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3460 {
3461 	if (enable) {
3462 		WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3463 	} else {
3464 		WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3465 				 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
3466 				  CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
3467 				  CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
3468 				  CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
3469 				  CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
3470 				  CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
3471 				  CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
3472 				  CP_MEC_CNTL__MEC_ME1_HALT_MASK |
3473 				  CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3474 		adev->gfx.kiq[0].ring.sched.ready = false;
3475 	}
3476 	udelay(50);
3477 }
3478 
3479 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3480 {
3481 	const struct gfx_firmware_header_v1_0 *mec_hdr;
3482 	const __le32 *fw_data;
3483 	unsigned i;
3484 	u32 tmp;
3485 
3486 	if (!adev->gfx.mec_fw)
3487 		return -EINVAL;
3488 
3489 	gfx_v9_0_cp_compute_enable(adev, false);
3490 
3491 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3492 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3493 
3494 	fw_data = (const __le32 *)
3495 		(adev->gfx.mec_fw->data +
3496 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3497 	tmp = 0;
3498 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3499 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3500 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3501 
3502 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3503 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3504 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3505 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3506 
3507 	/* MEC1 */
3508 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3509 			 mec_hdr->jt_offset);
3510 	for (i = 0; i < mec_hdr->jt_size; i++)
3511 		WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3512 			le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3513 
3514 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3515 			adev->gfx.mec_fw_version);
3516 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3517 
3518 	return 0;
3519 }
3520 
3521 /* KIQ functions */
3522 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3523 {
3524 	uint32_t tmp;
3525 	struct amdgpu_device *adev = ring->adev;
3526 
3527 	/* tell RLC which is KIQ queue */
3528 	tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3529 	tmp &= 0xffffff00;
3530 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3531 	WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
3532 }
3533 
3534 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3535 {
3536 	struct amdgpu_device *adev = ring->adev;
3537 
3538 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3539 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
3540 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3541 			mqd->cp_hqd_queue_priority =
3542 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3543 		}
3544 	}
3545 }
3546 
3547 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3548 {
3549 	struct amdgpu_device *adev = ring->adev;
3550 	struct v9_mqd *mqd = ring->mqd_ptr;
3551 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3552 	uint32_t tmp;
3553 
3554 	mqd->header = 0xC0310800;
3555 	mqd->compute_pipelinestat_enable = 0x00000001;
3556 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3557 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3558 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3559 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3560 	mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3561 	mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3562 	mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3563 	mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3564 	mqd->compute_misc_reserved = 0x00000003;
3565 
3566 	mqd->dynamic_cu_mask_addr_lo =
3567 		lower_32_bits(ring->mqd_gpu_addr
3568 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3569 	mqd->dynamic_cu_mask_addr_hi =
3570 		upper_32_bits(ring->mqd_gpu_addr
3571 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3572 
3573 	eop_base_addr = ring->eop_gpu_addr >> 8;
3574 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3575 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3576 
3577 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3578 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3579 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3580 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3581 
3582 	mqd->cp_hqd_eop_control = tmp;
3583 
3584 	/* enable doorbell? */
3585 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3586 
3587 	if (ring->use_doorbell) {
3588 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3589 				    DOORBELL_OFFSET, ring->doorbell_index);
3590 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3591 				    DOORBELL_EN, 1);
3592 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3593 				    DOORBELL_SOURCE, 0);
3594 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3595 				    DOORBELL_HIT, 0);
3596 	} else {
3597 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3598 					 DOORBELL_EN, 0);
3599 	}
3600 
3601 	mqd->cp_hqd_pq_doorbell_control = tmp;
3602 
3603 	/* disable the queue if it's active */
3604 	ring->wptr = 0;
3605 	mqd->cp_hqd_dequeue_request = 0;
3606 	mqd->cp_hqd_pq_rptr = 0;
3607 	mqd->cp_hqd_pq_wptr_lo = 0;
3608 	mqd->cp_hqd_pq_wptr_hi = 0;
3609 
3610 	/* set the pointer to the MQD */
3611 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3612 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3613 
3614 	/* set MQD vmid to 0 */
3615 	tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3616 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3617 	mqd->cp_mqd_control = tmp;
3618 
3619 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3620 	hqd_gpu_addr = ring->gpu_addr >> 8;
3621 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3622 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3623 
3624 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3625 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3626 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3627 			    (order_base_2(ring->ring_size / 4) - 1));
3628 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3629 			(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3630 #ifdef __BIG_ENDIAN
3631 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3632 #endif
3633 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3634 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3635 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3636 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3637 	mqd->cp_hqd_pq_control = tmp;
3638 
3639 	/* set the wb address whether it's enabled or not */
3640 	wb_gpu_addr = ring->rptr_gpu_addr;
3641 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3642 	mqd->cp_hqd_pq_rptr_report_addr_hi =
3643 		upper_32_bits(wb_gpu_addr) & 0xffff;
3644 
3645 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3646 	wb_gpu_addr = ring->wptr_gpu_addr;
3647 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3648 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3649 
3650 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3651 	ring->wptr = 0;
3652 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3653 
3654 	/* set the vmid for the queue */
3655 	mqd->cp_hqd_vmid = 0;
3656 
3657 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3658 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3659 	mqd->cp_hqd_persistent_state = tmp;
3660 
3661 	/* set MIN_IB_AVAIL_SIZE */
3662 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3663 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3664 	mqd->cp_hqd_ib_control = tmp;
3665 
3666 	/* set static priority for a queue/ring */
3667 	gfx_v9_0_mqd_set_priority(ring, mqd);
3668 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
3669 
3670 	/* map_queues packet doesn't need activate the queue,
3671 	 * so only kiq need set this field.
3672 	 */
3673 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3674 		mqd->cp_hqd_active = 1;
3675 
3676 	return 0;
3677 }
3678 
3679 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3680 {
3681 	struct amdgpu_device *adev = ring->adev;
3682 	struct v9_mqd *mqd = ring->mqd_ptr;
3683 	int j;
3684 
3685 	/* disable wptr polling */
3686 	WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3687 
3688 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3689 	       mqd->cp_hqd_eop_base_addr_lo);
3690 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3691 	       mqd->cp_hqd_eop_base_addr_hi);
3692 
3693 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3694 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3695 	       mqd->cp_hqd_eop_control);
3696 
3697 	/* enable doorbell? */
3698 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3699 	       mqd->cp_hqd_pq_doorbell_control);
3700 
3701 	/* disable the queue if it's active */
3702 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3703 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3704 		for (j = 0; j < adev->usec_timeout; j++) {
3705 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3706 				break;
3707 			udelay(1);
3708 		}
3709 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3710 		       mqd->cp_hqd_dequeue_request);
3711 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3712 		       mqd->cp_hqd_pq_rptr);
3713 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3714 		       mqd->cp_hqd_pq_wptr_lo);
3715 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3716 		       mqd->cp_hqd_pq_wptr_hi);
3717 	}
3718 
3719 	/* set the pointer to the MQD */
3720 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3721 	       mqd->cp_mqd_base_addr_lo);
3722 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3723 	       mqd->cp_mqd_base_addr_hi);
3724 
3725 	/* set MQD vmid to 0 */
3726 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3727 	       mqd->cp_mqd_control);
3728 
3729 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3730 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3731 	       mqd->cp_hqd_pq_base_lo);
3732 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3733 	       mqd->cp_hqd_pq_base_hi);
3734 
3735 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3736 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3737 	       mqd->cp_hqd_pq_control);
3738 
3739 	/* set the wb address whether it's enabled or not */
3740 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3741 				mqd->cp_hqd_pq_rptr_report_addr_lo);
3742 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3743 				mqd->cp_hqd_pq_rptr_report_addr_hi);
3744 
3745 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3746 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3747 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3748 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3749 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3750 
3751 	/* enable the doorbell if requested */
3752 	if (ring->use_doorbell) {
3753 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3754 					(adev->doorbell_index.kiq * 2) << 2);
3755 		/* If GC has entered CGPG, ringing doorbell > first page
3756 		 * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
3757 		 * workaround this issue. And this change has to align with firmware
3758 		 * update.
3759 		 */
3760 		if (check_if_enlarge_doorbell_range(adev))
3761 			WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3762 					(adev->doorbell.size - 4));
3763 		else
3764 			WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3765 					(adev->doorbell_index.userqueue_end * 2) << 2);
3766 	}
3767 
3768 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3769 	       mqd->cp_hqd_pq_doorbell_control);
3770 
3771 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3772 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3773 	       mqd->cp_hqd_pq_wptr_lo);
3774 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3775 	       mqd->cp_hqd_pq_wptr_hi);
3776 
3777 	/* set the vmid for the queue */
3778 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3779 
3780 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3781 	       mqd->cp_hqd_persistent_state);
3782 
3783 	/* activate the queue */
3784 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3785 	       mqd->cp_hqd_active);
3786 
3787 	if (ring->use_doorbell)
3788 		WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3789 
3790 	return 0;
3791 }
3792 
3793 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3794 {
3795 	struct amdgpu_device *adev = ring->adev;
3796 	int j;
3797 
3798 	/* disable the queue if it's active */
3799 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3800 
3801 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3802 
3803 		for (j = 0; j < adev->usec_timeout; j++) {
3804 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3805 				break;
3806 			udelay(1);
3807 		}
3808 
3809 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3810 			DRM_DEBUG("KIQ dequeue request failed.\n");
3811 
3812 			/* Manual disable if dequeue request times out */
3813 			WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3814 		}
3815 
3816 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3817 		      0);
3818 	}
3819 
3820 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3821 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3822 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3823 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3824 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3825 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3826 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3827 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3828 
3829 	return 0;
3830 }
3831 
3832 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3833 {
3834 	struct amdgpu_device *adev = ring->adev;
3835 	struct v9_mqd *mqd = ring->mqd_ptr;
3836 	struct v9_mqd *tmp_mqd;
3837 
3838 	gfx_v9_0_kiq_setting(ring);
3839 
3840 	/* GPU could be in bad state during probe, driver trigger the reset
3841 	 * after load the SMU, in this case , the mqd is not be initialized.
3842 	 * driver need to re-init the mqd.
3843 	 * check mqd->cp_hqd_pq_control since this value should not be 0
3844 	 */
3845 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup;
3846 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
3847 		/* for GPU_RESET case , reset MQD to a clean status */
3848 		if (adev->gfx.kiq[0].mqd_backup)
3849 			memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation));
3850 
3851 		/* reset ring buffer */
3852 		ring->wptr = 0;
3853 		amdgpu_ring_clear_ring(ring);
3854 
3855 		mutex_lock(&adev->srbm_mutex);
3856 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3857 		gfx_v9_0_kiq_init_register(ring);
3858 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3859 		mutex_unlock(&adev->srbm_mutex);
3860 	} else {
3861 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3862 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3863 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3864 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
3865 			amdgpu_ring_clear_ring(ring);
3866 		mutex_lock(&adev->srbm_mutex);
3867 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3868 		gfx_v9_0_mqd_init(ring);
3869 		gfx_v9_0_kiq_init_register(ring);
3870 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3871 		mutex_unlock(&adev->srbm_mutex);
3872 
3873 		if (adev->gfx.kiq[0].mqd_backup)
3874 			memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
3875 	}
3876 
3877 	return 0;
3878 }
3879 
3880 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
3881 {
3882 	struct amdgpu_device *adev = ring->adev;
3883 	struct v9_mqd *mqd = ring->mqd_ptr;
3884 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
3885 	struct v9_mqd *tmp_mqd;
3886 
3887 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
3888 	 * is not be initialized before
3889 	 */
3890 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3891 
3892 	if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
3893 	    (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
3894 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3895 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3896 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3897 		mutex_lock(&adev->srbm_mutex);
3898 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3899 		gfx_v9_0_mqd_init(ring);
3900 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3901 		mutex_unlock(&adev->srbm_mutex);
3902 
3903 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3904 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3905 	} else {
3906 		/* restore MQD to a clean status */
3907 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3908 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3909 		/* reset ring buffer */
3910 		ring->wptr = 0;
3911 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
3912 		amdgpu_ring_clear_ring(ring);
3913 	}
3914 
3915 	return 0;
3916 }
3917 
3918 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3919 {
3920 	gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
3921 	return 0;
3922 }
3923 
3924 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3925 {
3926 	int i, r;
3927 
3928 	gfx_v9_0_cp_compute_enable(adev, true);
3929 
3930 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3931 		r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
3932 		if (r)
3933 			return r;
3934 	}
3935 
3936 	return amdgpu_gfx_enable_kcq(adev, 0);
3937 }
3938 
3939 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3940 {
3941 	int r, i;
3942 	struct amdgpu_ring *ring;
3943 
3944 	if (!(adev->flags & AMD_IS_APU))
3945 		gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3946 
3947 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3948 		if (adev->gfx.num_gfx_rings) {
3949 			/* legacy firmware loading */
3950 			r = gfx_v9_0_cp_gfx_load_microcode(adev);
3951 			if (r)
3952 				return r;
3953 		}
3954 
3955 		r = gfx_v9_0_cp_compute_load_microcode(adev);
3956 		if (r)
3957 			return r;
3958 	}
3959 
3960 	if (adev->gfx.num_gfx_rings)
3961 		gfx_v9_0_cp_gfx_enable(adev, false);
3962 	gfx_v9_0_cp_compute_enable(adev, false);
3963 
3964 	r = gfx_v9_0_kiq_resume(adev);
3965 	if (r)
3966 		return r;
3967 
3968 	if (adev->gfx.num_gfx_rings) {
3969 		r = gfx_v9_0_cp_gfx_resume(adev);
3970 		if (r)
3971 			return r;
3972 	}
3973 
3974 	r = gfx_v9_0_kcq_resume(adev);
3975 	if (r)
3976 		return r;
3977 
3978 	if (adev->gfx.num_gfx_rings) {
3979 		ring = &adev->gfx.gfx_ring[0];
3980 		r = amdgpu_ring_test_helper(ring);
3981 		if (r)
3982 			return r;
3983 	}
3984 
3985 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3986 		ring = &adev->gfx.compute_ring[i];
3987 		amdgpu_ring_test_helper(ring);
3988 	}
3989 
3990 	gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3991 
3992 	return 0;
3993 }
3994 
3995 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3996 {
3997 	u32 tmp;
3998 
3999 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1) &&
4000 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
4001 		return;
4002 
4003 	tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
4004 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
4005 				adev->df.hash_status.hash_64k);
4006 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
4007 				adev->df.hash_status.hash_2m);
4008 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
4009 				adev->df.hash_status.hash_1g);
4010 	WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
4011 }
4012 
4013 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
4014 {
4015 	if (adev->gfx.num_gfx_rings)
4016 		gfx_v9_0_cp_gfx_enable(adev, enable);
4017 	gfx_v9_0_cp_compute_enable(adev, enable);
4018 }
4019 
4020 static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
4021 {
4022 	int r;
4023 	struct amdgpu_device *adev = ip_block->adev;
4024 
4025 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
4026 				       adev->gfx.cleaner_shader_ptr);
4027 
4028 	if (!amdgpu_sriov_vf(adev))
4029 		gfx_v9_0_init_golden_registers(adev);
4030 
4031 	gfx_v9_0_constants_init(adev);
4032 
4033 	gfx_v9_0_init_tcp_config(adev);
4034 
4035 	r = adev->gfx.rlc.funcs->resume(adev);
4036 	if (r)
4037 		return r;
4038 
4039 	r = gfx_v9_0_cp_resume(adev);
4040 	if (r)
4041 		return r;
4042 
4043 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) &&
4044 	    !amdgpu_sriov_vf(adev))
4045 		gfx_v9_4_2_set_power_brake_sequence(adev);
4046 
4047 	return r;
4048 }
4049 
4050 static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
4051 {
4052 	struct amdgpu_device *adev = ip_block->adev;
4053 
4054 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4055 		amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4056 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4057 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4058 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
4059 
4060 	/* DF freeze and kcq disable will fail */
4061 	if (!amdgpu_ras_intr_triggered())
4062 		/* disable KCQ to avoid CPC touch memory not valid anymore */
4063 		amdgpu_gfx_disable_kcq(adev, 0);
4064 
4065 	if (amdgpu_sriov_vf(adev)) {
4066 		gfx_v9_0_cp_gfx_enable(adev, false);
4067 		/* must disable polling for SRIOV when hw finished, otherwise
4068 		 * CPC engine may still keep fetching WB address which is already
4069 		 * invalid after sw finished and trigger DMAR reading error in
4070 		 * hypervisor side.
4071 		 */
4072 		WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4073 		return 0;
4074 	}
4075 
4076 	/* Use deinitialize sequence from CAIL when unbinding device from driver,
4077 	 * otherwise KIQ is hanging when binding back
4078 	 */
4079 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4080 		mutex_lock(&adev->srbm_mutex);
4081 		soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
4082 				adev->gfx.kiq[0].ring.pipe,
4083 				adev->gfx.kiq[0].ring.queue, 0, 0);
4084 		gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
4085 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4086 		mutex_unlock(&adev->srbm_mutex);
4087 	}
4088 
4089 	gfx_v9_0_cp_enable(adev, false);
4090 
4091 	/* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
4092 	if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
4093 	    (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) {
4094 		dev_dbg(adev->dev, "Skipping RLC halt\n");
4095 		return 0;
4096 	}
4097 
4098 	adev->gfx.rlc.funcs->stop(adev);
4099 	return 0;
4100 }
4101 
4102 static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
4103 {
4104 	return gfx_v9_0_hw_fini(ip_block);
4105 }
4106 
4107 static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
4108 {
4109 	return gfx_v9_0_hw_init(ip_block);
4110 }
4111 
4112 static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
4113 {
4114 	struct amdgpu_device *adev = ip_block->adev;
4115 
4116 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
4117 				GRBM_STATUS, GUI_ACTIVE))
4118 		return false;
4119 	else
4120 		return true;
4121 }
4122 
4123 static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4124 {
4125 	unsigned i;
4126 	struct amdgpu_device *adev = ip_block->adev;
4127 
4128 	for (i = 0; i < adev->usec_timeout; i++) {
4129 		if (gfx_v9_0_is_idle(ip_block))
4130 			return 0;
4131 		udelay(1);
4132 	}
4133 	return -ETIMEDOUT;
4134 }
4135 
4136 static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
4137 {
4138 	u32 grbm_soft_reset = 0;
4139 	u32 tmp;
4140 	struct amdgpu_device *adev = ip_block->adev;
4141 
4142 	/* GRBM_STATUS */
4143 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4144 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4145 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4146 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4147 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4148 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4149 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4150 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4151 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4152 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4153 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4154 	}
4155 
4156 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4157 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4158 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4159 	}
4160 
4161 	/* GRBM_STATUS2 */
4162 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4163 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4164 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4165 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4166 
4167 
4168 	if (grbm_soft_reset) {
4169 		/* stop the rlc */
4170 		adev->gfx.rlc.funcs->stop(adev);
4171 
4172 		if (adev->gfx.num_gfx_rings)
4173 			/* Disable GFX parsing/prefetching */
4174 			gfx_v9_0_cp_gfx_enable(adev, false);
4175 
4176 		/* Disable MEC parsing/prefetching */
4177 		gfx_v9_0_cp_compute_enable(adev, false);
4178 
4179 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4180 		tmp |= grbm_soft_reset;
4181 		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4182 		WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4183 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4184 
4185 		udelay(50);
4186 
4187 		tmp &= ~grbm_soft_reset;
4188 		WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4189 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4190 
4191 		/* Wait a little for things to settle down */
4192 		udelay(50);
4193 	}
4194 	return 0;
4195 }
4196 
4197 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4198 {
4199 	signed long r, cnt = 0;
4200 	unsigned long flags;
4201 	uint32_t seq, reg_val_offs = 0;
4202 	uint64_t value = 0;
4203 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
4204 	struct amdgpu_ring *ring = &kiq->ring;
4205 
4206 	BUG_ON(!ring->funcs->emit_rreg);
4207 
4208 	spin_lock_irqsave(&kiq->ring_lock, flags);
4209 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4210 		pr_err("critical bug! too many kiq readers\n");
4211 		goto failed_unlock;
4212 	}
4213 	amdgpu_ring_alloc(ring, 32);
4214 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4215 	amdgpu_ring_write(ring, 9 |	/* src: register*/
4216 				(5 << 8) |	/* dst: memory */
4217 				(1 << 16) |	/* count sel */
4218 				(1 << 20));	/* write confirm */
4219 	amdgpu_ring_write(ring, 0);
4220 	amdgpu_ring_write(ring, 0);
4221 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4222 				reg_val_offs * 4));
4223 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4224 				reg_val_offs * 4));
4225 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4226 	if (r)
4227 		goto failed_undo;
4228 
4229 	amdgpu_ring_commit(ring);
4230 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4231 
4232 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4233 
4234 	/* don't wait anymore for gpu reset case because this way may
4235 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4236 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4237 	 * never return if we keep waiting in virt_kiq_rreg, which cause
4238 	 * gpu_recover() hang there.
4239 	 *
4240 	 * also don't wait anymore for IRQ context
4241 	 * */
4242 	if (r < 1 && (amdgpu_in_reset(adev)))
4243 		goto failed_kiq_read;
4244 
4245 	might_sleep();
4246 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4247 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4248 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4249 	}
4250 
4251 	if (cnt > MAX_KIQ_REG_TRY)
4252 		goto failed_kiq_read;
4253 
4254 	mb();
4255 	value = (uint64_t)adev->wb.wb[reg_val_offs] |
4256 		(uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4257 	amdgpu_device_wb_free(adev, reg_val_offs);
4258 	return value;
4259 
4260 failed_undo:
4261 	amdgpu_ring_undo(ring);
4262 failed_unlock:
4263 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4264 failed_kiq_read:
4265 	if (reg_val_offs)
4266 		amdgpu_device_wb_free(adev, reg_val_offs);
4267 	pr_err("failed to read gpu clock\n");
4268 	return ~0;
4269 }
4270 
4271 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4272 {
4273 	uint64_t clock, clock_lo, clock_hi, hi_check;
4274 
4275 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4276 	case IP_VERSION(9, 3, 0):
4277 		preempt_disable();
4278 		clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4279 		clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4280 		hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4281 		/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
4282 		 * roughly every 42 seconds.
4283 		 */
4284 		if (hi_check != clock_hi) {
4285 			clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4286 			clock_hi = hi_check;
4287 		}
4288 		preempt_enable();
4289 		clock = clock_lo | (clock_hi << 32ULL);
4290 		break;
4291 	default:
4292 		amdgpu_gfx_off_ctrl(adev, false);
4293 		mutex_lock(&adev->gfx.gpu_clock_mutex);
4294 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
4295 			    IP_VERSION(9, 0, 1) &&
4296 		    amdgpu_sriov_runtime(adev)) {
4297 			clock = gfx_v9_0_kiq_read_clock(adev);
4298 		} else {
4299 			WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4300 			clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4301 				((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4302 		}
4303 		mutex_unlock(&adev->gfx.gpu_clock_mutex);
4304 		amdgpu_gfx_off_ctrl(adev, true);
4305 		break;
4306 	}
4307 	return clock;
4308 }
4309 
4310 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4311 					  uint32_t vmid,
4312 					  uint32_t gds_base, uint32_t gds_size,
4313 					  uint32_t gws_base, uint32_t gws_size,
4314 					  uint32_t oa_base, uint32_t oa_size)
4315 {
4316 	struct amdgpu_device *adev = ring->adev;
4317 
4318 	/* GDS Base */
4319 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4320 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4321 				   gds_base);
4322 
4323 	/* GDS Size */
4324 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4325 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4326 				   gds_size);
4327 
4328 	/* GWS */
4329 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4330 				   SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4331 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4332 
4333 	/* OA */
4334 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4335 				   SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4336 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
4337 }
4338 
4339 static const u32 vgpr_init_compute_shader[] =
4340 {
4341 	0xb07c0000, 0xbe8000ff,
4342 	0x000000f8, 0xbf110800,
4343 	0x7e000280, 0x7e020280,
4344 	0x7e040280, 0x7e060280,
4345 	0x7e080280, 0x7e0a0280,
4346 	0x7e0c0280, 0x7e0e0280,
4347 	0x80808800, 0xbe803200,
4348 	0xbf84fff5, 0xbf9c0000,
4349 	0xd28c0001, 0x0001007f,
4350 	0xd28d0001, 0x0002027e,
4351 	0x10020288, 0xb8810904,
4352 	0xb7814000, 0xd1196a01,
4353 	0x00000301, 0xbe800087,
4354 	0xbefc00c1, 0xd89c4000,
4355 	0x00020201, 0xd89cc080,
4356 	0x00040401, 0x320202ff,
4357 	0x00000800, 0x80808100,
4358 	0xbf84fff8, 0x7e020280,
4359 	0xbf810000, 0x00000000,
4360 };
4361 
4362 static const u32 sgpr_init_compute_shader[] =
4363 {
4364 	0xb07c0000, 0xbe8000ff,
4365 	0x0000005f, 0xbee50080,
4366 	0xbe812c65, 0xbe822c65,
4367 	0xbe832c65, 0xbe842c65,
4368 	0xbe852c65, 0xb77c0005,
4369 	0x80808500, 0xbf84fff8,
4370 	0xbe800080, 0xbf810000,
4371 };
4372 
4373 static const u32 vgpr_init_compute_shader_arcturus[] = {
4374 	0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4375 	0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4376 	0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4377 	0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4378 	0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4379 	0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4380 	0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4381 	0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4382 	0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4383 	0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4384 	0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4385 	0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4386 	0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4387 	0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4388 	0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4389 	0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4390 	0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4391 	0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4392 	0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4393 	0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4394 	0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4395 	0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4396 	0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4397 	0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4398 	0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4399 	0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4400 	0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4401 	0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4402 	0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4403 	0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4404 	0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4405 	0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4406 	0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4407 	0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4408 	0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4409 	0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4410 	0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4411 	0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4412 	0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4413 	0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4414 	0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4415 	0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4416 	0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4417 	0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4418 	0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4419 	0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4420 	0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4421 	0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4422 	0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4423 	0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4424 	0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4425 	0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4426 	0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4427 	0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4428 	0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4429 	0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4430 	0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4431 	0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4432 	0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4433 	0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4434 	0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4435 	0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4436 	0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4437 	0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4438 	0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4439 	0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4440 	0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4441 	0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4442 	0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4443 	0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4444 	0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4445 	0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4446 	0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4447 	0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4448 	0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4449 	0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4450 	0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4451 	0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4452 	0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4453 	0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4454 	0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4455 	0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4456 	0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4457 	0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4458 	0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4459 	0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4460 	0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4461 	0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4462 	0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4463 	0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4464 	0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4465 	0xbf84fff8, 0xbf810000,
4466 };
4467 
4468 /* When below register arrays changed, please update gpr_reg_size,
4469   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4470   to cover all gfx9 ASICs */
4471 static const struct soc15_reg_entry vgpr_init_regs[] = {
4472    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4473    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4474    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4475    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4476    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4477    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4478    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4479    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4480    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4481    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4482    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4483    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4484    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4485    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4486 };
4487 
4488 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4489    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4490    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4491    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4492    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4493    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4494    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4495    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4496    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4497    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4498    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4499    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4500    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4501    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4502    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4503 };
4504 
4505 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4506    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4507    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4508    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4509    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4510    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4511    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4512    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4513    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4514    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4515    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4516    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4517    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4518    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4519    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4520 };
4521 
4522 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4523    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4524    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4525    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4526    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4527    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4528    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4529    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4530    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4531    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4532    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4533    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4534    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4535    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4536    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4537 };
4538 
4539 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4540    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4541    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4542    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4543    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4544    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4545    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4546    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4547    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4548    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4549    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4550    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4551    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4552    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4553    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4554    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4555    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4556    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4557    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4558    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4559    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4560    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4561    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4562    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4563    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4564    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4565    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4566    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4567    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4568    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4569    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4570    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4571    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4572    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4573 };
4574 
4575 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4576 {
4577 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4578 	int i, r;
4579 
4580 	/* only support when RAS is enabled */
4581 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4582 		return 0;
4583 
4584 	r = amdgpu_ring_alloc(ring, 7);
4585 	if (r) {
4586 		drm_err(adev_to_drm(adev), "GDS workarounds failed to lock ring %s (%d).\n",
4587 			ring->name, r);
4588 		return r;
4589 	}
4590 
4591 	WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4592 	WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4593 
4594 	amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4595 	amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4596 				PACKET3_DMA_DATA_DST_SEL(1) |
4597 				PACKET3_DMA_DATA_SRC_SEL(2) |
4598 				PACKET3_DMA_DATA_ENGINE(0)));
4599 	amdgpu_ring_write(ring, 0);
4600 	amdgpu_ring_write(ring, 0);
4601 	amdgpu_ring_write(ring, 0);
4602 	amdgpu_ring_write(ring, 0);
4603 	amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4604 				adev->gds.gds_size);
4605 
4606 	amdgpu_ring_commit(ring);
4607 
4608 	for (i = 0; i < adev->usec_timeout; i++) {
4609 		if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4610 			break;
4611 		udelay(1);
4612 	}
4613 
4614 	if (i >= adev->usec_timeout)
4615 		r = -ETIMEDOUT;
4616 
4617 	WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4618 
4619 	return r;
4620 }
4621 
4622 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4623 {
4624 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4625 	struct amdgpu_ib ib;
4626 	struct dma_fence *f = NULL;
4627 	int r, i;
4628 	unsigned total_size, vgpr_offset, sgpr_offset;
4629 	u64 gpu_addr;
4630 
4631 	int compute_dim_x = adev->gfx.config.max_shader_engines *
4632 						adev->gfx.config.max_cu_per_sh *
4633 						adev->gfx.config.max_sh_per_se;
4634 	int sgpr_work_group_size = 5;
4635 	int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4636 	int vgpr_init_shader_size;
4637 	const u32 *vgpr_init_shader_ptr;
4638 	const struct soc15_reg_entry *vgpr_init_regs_ptr;
4639 
4640 	/* only support when RAS is enabled */
4641 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4642 		return 0;
4643 
4644 	/* bail if the compute ring is not ready */
4645 	if (!ring->sched.ready)
4646 		return 0;
4647 
4648 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
4649 		vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4650 		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4651 		vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4652 	} else {
4653 		vgpr_init_shader_ptr = vgpr_init_compute_shader;
4654 		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4655 		vgpr_init_regs_ptr = vgpr_init_regs;
4656 	}
4657 
4658 	total_size =
4659 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4660 	total_size +=
4661 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4662 	total_size +=
4663 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4664 	total_size = ALIGN(total_size, 256);
4665 	vgpr_offset = total_size;
4666 	total_size += ALIGN(vgpr_init_shader_size, 256);
4667 	sgpr_offset = total_size;
4668 	total_size += sizeof(sgpr_init_compute_shader);
4669 
4670 	/* allocate an indirect buffer to put the commands in */
4671 	memset(&ib, 0, sizeof(ib));
4672 	r = amdgpu_ib_get(adev, NULL, total_size,
4673 					AMDGPU_IB_POOL_DIRECT, &ib);
4674 	if (r) {
4675 		drm_err(adev_to_drm(adev), "failed to get ib (%d).\n", r);
4676 		return r;
4677 	}
4678 
4679 	/* load the compute shaders */
4680 	for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4681 		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4682 
4683 	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4684 		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4685 
4686 	/* init the ib length to 0 */
4687 	ib.length_dw = 0;
4688 
4689 	/* VGPR */
4690 	/* write the register state for the compute dispatch */
4691 	for (i = 0; i < gpr_reg_size; i++) {
4692 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4693 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4694 								- PACKET3_SET_SH_REG_START;
4695 		ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4696 	}
4697 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4698 	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4699 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4700 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4701 							- PACKET3_SET_SH_REG_START;
4702 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4703 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4704 
4705 	/* write dispatch packet */
4706 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4707 	ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4708 	ib.ptr[ib.length_dw++] = 1; /* y */
4709 	ib.ptr[ib.length_dw++] = 1; /* z */
4710 	ib.ptr[ib.length_dw++] =
4711 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4712 
4713 	/* write CS partial flush packet */
4714 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4715 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4716 
4717 	/* SGPR1 */
4718 	/* write the register state for the compute dispatch */
4719 	for (i = 0; i < gpr_reg_size; i++) {
4720 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4721 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4722 								- PACKET3_SET_SH_REG_START;
4723 		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4724 	}
4725 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4726 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4727 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4728 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4729 							- PACKET3_SET_SH_REG_START;
4730 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4731 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4732 
4733 	/* write dispatch packet */
4734 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4735 	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4736 	ib.ptr[ib.length_dw++] = 1; /* y */
4737 	ib.ptr[ib.length_dw++] = 1; /* z */
4738 	ib.ptr[ib.length_dw++] =
4739 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4740 
4741 	/* write CS partial flush packet */
4742 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4743 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4744 
4745 	/* SGPR2 */
4746 	/* write the register state for the compute dispatch */
4747 	for (i = 0; i < gpr_reg_size; i++) {
4748 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4749 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4750 								- PACKET3_SET_SH_REG_START;
4751 		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4752 	}
4753 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4754 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4755 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4756 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4757 							- PACKET3_SET_SH_REG_START;
4758 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4759 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4760 
4761 	/* write dispatch packet */
4762 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4763 	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4764 	ib.ptr[ib.length_dw++] = 1; /* y */
4765 	ib.ptr[ib.length_dw++] = 1; /* z */
4766 	ib.ptr[ib.length_dw++] =
4767 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4768 
4769 	/* write CS partial flush packet */
4770 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4771 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4772 
4773 	/* shedule the ib on the ring */
4774 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4775 	if (r) {
4776 		drm_err(adev_to_drm(adev), "ib schedule failed (%d).\n", r);
4777 		goto fail;
4778 	}
4779 
4780 	/* wait for the GPU to finish processing the IB */
4781 	r = dma_fence_wait(f, false);
4782 	if (r) {
4783 		drm_err(adev_to_drm(adev), "fence wait failed (%d).\n", r);
4784 		goto fail;
4785 	}
4786 
4787 fail:
4788 	amdgpu_ib_free(&ib, NULL);
4789 	dma_fence_put(f);
4790 
4791 	return r;
4792 }
4793 
4794 static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block)
4795 {
4796 	struct amdgpu_device *adev = ip_block->adev;
4797 
4798 	adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
4799 
4800 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
4801 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4802 		adev->gfx.num_gfx_rings = 0;
4803 	else
4804 		adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4805 	adev->gfx.xcc_mask = 1;
4806 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4807 					  AMDGPU_MAX_COMPUTE_RINGS);
4808 	gfx_v9_0_set_kiq_pm4_funcs(adev);
4809 	gfx_v9_0_set_ring_funcs(adev);
4810 	gfx_v9_0_set_irq_funcs(adev);
4811 	gfx_v9_0_set_gds_init(adev);
4812 	gfx_v9_0_set_rlc_funcs(adev);
4813 
4814 	/* init rlcg reg access ctrl */
4815 	gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
4816 
4817 	return gfx_v9_0_init_microcode(adev);
4818 }
4819 
4820 static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block)
4821 {
4822 	struct amdgpu_device *adev = ip_block->adev;
4823 	int r;
4824 
4825 	/*
4826 	 * Temp workaround to fix the issue that CP firmware fails to
4827 	 * update read pointer when CPDMA is writing clearing operation
4828 	 * to GDS in suspend/resume sequence on several cards. So just
4829 	 * limit this operation in cold boot sequence.
4830 	 */
4831 	if ((!adev->in_suspend) &&
4832 	    (adev->gds.gds_size)) {
4833 		r = gfx_v9_0_do_edc_gds_workarounds(adev);
4834 		if (r)
4835 			return r;
4836 	}
4837 
4838 	/* requires IBs so do in late init after IB pool is initialized */
4839 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4840 		r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
4841 	else
4842 		r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4843 
4844 	if (r)
4845 		return r;
4846 
4847 	if (adev->gfx.ras &&
4848 	    adev->gfx.ras->enable_watchdog_timer)
4849 		adev->gfx.ras->enable_watchdog_timer(adev);
4850 
4851 	return 0;
4852 }
4853 
4854 static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block)
4855 {
4856 	struct amdgpu_device *adev = ip_block->adev;
4857 	int r;
4858 
4859 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4860 	if (r)
4861 		return r;
4862 
4863 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4864 	if (r)
4865 		return r;
4866 
4867 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
4868 	if (r)
4869 		return r;
4870 
4871 	r = gfx_v9_0_ecc_late_init(ip_block);
4872 	if (r)
4873 		return r;
4874 
4875 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4876 		gfx_v9_4_2_debug_trap_config_init(adev,
4877 			adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
4878 	else
4879 		gfx_v9_0_debug_trap_config_init(adev,
4880 			adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
4881 
4882 	return 0;
4883 }
4884 
4885 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4886 {
4887 	uint32_t rlc_setting;
4888 
4889 	/* if RLC is not enabled, do nothing */
4890 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4891 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4892 		return false;
4893 
4894 	return true;
4895 }
4896 
4897 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
4898 {
4899 	uint32_t data;
4900 	unsigned i;
4901 
4902 	data = RLC_SAFE_MODE__CMD_MASK;
4903 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4904 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4905 
4906 	/* wait for RLC_SAFE_MODE */
4907 	for (i = 0; i < adev->usec_timeout; i++) {
4908 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4909 			break;
4910 		udelay(1);
4911 	}
4912 }
4913 
4914 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
4915 {
4916 	uint32_t data;
4917 
4918 	data = RLC_SAFE_MODE__CMD_MASK;
4919 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4920 }
4921 
4922 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4923 						bool enable)
4924 {
4925 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4926 
4927 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4928 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4929 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4930 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4931 	} else {
4932 		gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4933 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4934 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4935 	}
4936 
4937 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4938 }
4939 
4940 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4941 						bool enable)
4942 {
4943 	/* TODO: double check if we need to perform under safe mode */
4944 	/* gfx_v9_0_enter_rlc_safe_mode(adev); */
4945 
4946 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4947 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4948 	else
4949 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4950 
4951 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4952 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4953 	else
4954 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4955 
4956 	/* gfx_v9_0_exit_rlc_safe_mode(adev); */
4957 }
4958 
4959 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4960 						      bool enable)
4961 {
4962 	uint32_t data, def;
4963 
4964 	/* It is disabled by HW by default */
4965 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4966 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
4967 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4968 
4969 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
4970 			data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4971 
4972 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4973 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4974 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4975 
4976 		/* only for Vega10 & Raven1 */
4977 		data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4978 
4979 		if (def != data)
4980 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4981 
4982 		/* MGLS is a global flag to control all MGLS in GFX */
4983 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4984 			/* 2 - RLC memory Light sleep */
4985 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4986 				def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4987 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4988 				if (def != data)
4989 					WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4990 			}
4991 			/* 3 - CP memory Light sleep */
4992 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4993 				def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4994 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4995 				if (def != data)
4996 					WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4997 			}
4998 		}
4999 	} else {
5000 		/* 1 - MGCG_OVERRIDE */
5001 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5002 
5003 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
5004 			data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
5005 
5006 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5007 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5008 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
5009 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
5010 
5011 		if (def != data)
5012 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5013 
5014 		/* 2 - disable MGLS in RLC */
5015 		data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
5016 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5017 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5018 			WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
5019 		}
5020 
5021 		/* 3 - disable MGLS in CP */
5022 		data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
5023 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5024 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5025 			WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
5026 		}
5027 	}
5028 }
5029 
5030 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
5031 					   bool enable)
5032 {
5033 	uint32_t data, def;
5034 
5035 	if (!adev->gfx.num_gfx_rings)
5036 		return;
5037 
5038 	/* Enable 3D CGCG/CGLS */
5039 	if (enable) {
5040 		/* write cmd to clear cgcg/cgls ov */
5041 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5042 		/* unset CGCG override */
5043 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5044 		/* update CGCG and CGLS override bits */
5045 		if (def != data)
5046 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5047 
5048 		/* enable 3Dcgcg FSM(0x0000363f) */
5049 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5050 
5051 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5052 			data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5053 				RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5054 		else
5055 			data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
5056 
5057 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5058 			data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5059 				RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5060 		if (def != data)
5061 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5062 
5063 		/* set IDLE_POLL_COUNT(0x00900100) */
5064 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5065 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5066 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5067 		if (def != data)
5068 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5069 	} else {
5070 		/* Disable CGCG/CGLS */
5071 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5072 		/* disable cgcg, cgls should be disabled */
5073 		data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
5074 			  RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
5075 		/* disable cgcg and cgls in FSM */
5076 		if (def != data)
5077 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5078 	}
5079 }
5080 
5081 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5082 						      bool enable)
5083 {
5084 	uint32_t def, data;
5085 
5086 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5087 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5088 		/* unset CGCG override */
5089 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5090 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5091 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5092 		else
5093 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5094 		/* update CGCG and CGLS override bits */
5095 		if (def != data)
5096 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5097 
5098 		/* enable cgcg FSM(0x0000363F) */
5099 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5100 
5101 		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))
5102 			data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5103 				RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5104 		else
5105 			data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5106 				RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5107 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5108 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5109 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5110 		if (def != data)
5111 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5112 
5113 		/* set IDLE_POLL_COUNT(0x00900100) */
5114 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5115 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5116 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5117 		if (def != data)
5118 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5119 	} else {
5120 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5121 		/* reset CGCG/CGLS bits */
5122 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5123 		/* disable cgcg and cgls in FSM */
5124 		if (def != data)
5125 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5126 	}
5127 }
5128 
5129 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5130 					    bool enable)
5131 {
5132 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5133 	if (enable) {
5134 		/* CGCG/CGLS should be enabled after MGCG/MGLS
5135 		 * ===  MGCG + MGLS ===
5136 		 */
5137 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5138 		/* ===  CGCG /CGLS for GFX 3D Only === */
5139 		gfx_v9_0_update_3d_clock_gating(adev, enable);
5140 		/* ===  CGCG + CGLS === */
5141 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5142 	} else {
5143 		/* CGCG/CGLS should be disabled before MGCG/MGLS
5144 		 * ===  CGCG + CGLS ===
5145 		 */
5146 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5147 		/* ===  CGCG /CGLS for GFX 3D Only === */
5148 		gfx_v9_0_update_3d_clock_gating(adev, enable);
5149 		/* ===  MGCG + MGLS === */
5150 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5151 	}
5152 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5153 	return 0;
5154 }
5155 
5156 static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
5157 					      unsigned int vmid)
5158 {
5159 	u32 reg, data;
5160 
5161 	reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5162 	if (amdgpu_sriov_is_pp_one_vf(adev))
5163 		data = RREG32_NO_KIQ(reg);
5164 	else
5165 		data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
5166 
5167 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5168 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5169 
5170 	if (amdgpu_sriov_is_pp_one_vf(adev))
5171 		WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5172 	else
5173 		WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5174 }
5175 
5176 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, int xcc_id,
5177 		struct amdgpu_ring *ring, unsigned int vmid)
5178 {
5179 	amdgpu_gfx_off_ctrl(adev, false);
5180 
5181 	gfx_v9_0_update_spm_vmid_internal(adev, vmid);
5182 
5183 	amdgpu_gfx_off_ctrl(adev, true);
5184 }
5185 
5186 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5187 					uint32_t offset,
5188 					struct soc15_reg_rlcg *entries, int arr_size)
5189 {
5190 	int i;
5191 	uint32_t reg;
5192 
5193 	if (!entries)
5194 		return false;
5195 
5196 	for (i = 0; i < arr_size; i++) {
5197 		const struct soc15_reg_rlcg *entry;
5198 
5199 		entry = &entries[i];
5200 		reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5201 		if (offset == reg)
5202 			return true;
5203 	}
5204 
5205 	return false;
5206 }
5207 
5208 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5209 {
5210 	return gfx_v9_0_check_rlcg_range(adev, offset,
5211 					(void *)rlcg_access_gc_9_0,
5212 					ARRAY_SIZE(rlcg_access_gc_9_0));
5213 }
5214 
5215 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5216 	.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5217 	.set_safe_mode = gfx_v9_0_set_safe_mode,
5218 	.unset_safe_mode = gfx_v9_0_unset_safe_mode,
5219 	.init = gfx_v9_0_rlc_init,
5220 	.get_csb_size = gfx_v9_0_get_csb_size,
5221 	.get_csb_buffer = gfx_v9_0_get_csb_buffer,
5222 	.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5223 	.resume = gfx_v9_0_rlc_resume,
5224 	.stop = gfx_v9_0_rlc_stop,
5225 	.reset = gfx_v9_0_rlc_reset,
5226 	.start = gfx_v9_0_rlc_start,
5227 	.update_spm_vmid = gfx_v9_0_update_spm_vmid,
5228 	.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5229 };
5230 
5231 static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
5232 					  enum amd_powergating_state state)
5233 {
5234 	struct amdgpu_device *adev = ip_block->adev;
5235 	bool enable = (state == AMD_PG_STATE_GATE);
5236 
5237 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5238 	case IP_VERSION(9, 2, 2):
5239 	case IP_VERSION(9, 1, 0):
5240 	case IP_VERSION(9, 3, 0):
5241 		if (!enable)
5242 			amdgpu_gfx_off_ctrl_immediate(adev, false);
5243 
5244 		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5245 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5246 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5247 		} else {
5248 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5249 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5250 		}
5251 
5252 		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5253 			gfx_v9_0_enable_cp_power_gating(adev, true);
5254 		else
5255 			gfx_v9_0_enable_cp_power_gating(adev, false);
5256 
5257 		/* update gfx cgpg state */
5258 		gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5259 
5260 		/* update mgcg state */
5261 		gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5262 
5263 		if (enable)
5264 			amdgpu_gfx_off_ctrl_immediate(adev, true);
5265 		break;
5266 	case IP_VERSION(9, 2, 1):
5267 		amdgpu_gfx_off_ctrl_immediate(adev, enable);
5268 		break;
5269 	default:
5270 		break;
5271 	}
5272 
5273 	return 0;
5274 }
5275 
5276 static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
5277 					  enum amd_clockgating_state state)
5278 {
5279 	struct amdgpu_device *adev = ip_block->adev;
5280 
5281 	if (amdgpu_sriov_vf(adev))
5282 		return 0;
5283 
5284 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5285 	case IP_VERSION(9, 0, 1):
5286 	case IP_VERSION(9, 2, 1):
5287 	case IP_VERSION(9, 4, 0):
5288 	case IP_VERSION(9, 2, 2):
5289 	case IP_VERSION(9, 1, 0):
5290 	case IP_VERSION(9, 4, 1):
5291 	case IP_VERSION(9, 3, 0):
5292 	case IP_VERSION(9, 4, 2):
5293 		gfx_v9_0_update_gfx_clock_gating(adev,
5294 						 state == AMD_CG_STATE_GATE);
5295 		break;
5296 	default:
5297 		break;
5298 	}
5299 	return 0;
5300 }
5301 
5302 static void gfx_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
5303 {
5304 	struct amdgpu_device *adev = ip_block->adev;
5305 	int data;
5306 
5307 	if (amdgpu_sriov_vf(adev))
5308 		*flags = 0;
5309 
5310 	/* AMD_CG_SUPPORT_GFX_MGCG */
5311 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5312 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5313 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5314 
5315 	/* AMD_CG_SUPPORT_GFX_CGCG */
5316 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5317 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5318 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5319 
5320 	/* AMD_CG_SUPPORT_GFX_CGLS */
5321 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5322 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5323 
5324 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
5325 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5326 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5327 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5328 
5329 	/* AMD_CG_SUPPORT_GFX_CP_LS */
5330 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5331 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5332 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5333 
5334 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) {
5335 		/* AMD_CG_SUPPORT_GFX_3D_CGCG */
5336 		data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5337 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5338 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5339 
5340 		/* AMD_CG_SUPPORT_GFX_3D_CGLS */
5341 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5342 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5343 	}
5344 }
5345 
5346 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5347 {
5348 	return *ring->rptr_cpu_addr; /* gfx9 is 32bit rptr*/
5349 }
5350 
5351 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5352 {
5353 	struct amdgpu_device *adev = ring->adev;
5354 	u64 wptr;
5355 
5356 	/* XXX check if swapping is necessary on BE */
5357 	if (ring->use_doorbell) {
5358 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5359 	} else {
5360 		wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5361 		wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5362 	}
5363 
5364 	return wptr;
5365 }
5366 
5367 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5368 {
5369 	struct amdgpu_device *adev = ring->adev;
5370 
5371 	if (ring->use_doorbell) {
5372 		/* XXX check if swapping is necessary on BE */
5373 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5374 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5375 	} else {
5376 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5377 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5378 	}
5379 }
5380 
5381 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5382 {
5383 	struct amdgpu_device *adev = ring->adev;
5384 	u32 ref_and_mask, reg_mem_engine;
5385 
5386 	if (!adev->gfx.funcs->get_hdp_flush_mask) {
5387 		dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
5388 		return;
5389 	}
5390 
5391 	adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
5392 	gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5393 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5394 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5395 			      ref_and_mask, ref_and_mask, 0x20);
5396 }
5397 
5398 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5399 					struct amdgpu_job *job,
5400 					struct amdgpu_ib *ib,
5401 					uint32_t flags)
5402 {
5403 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5404 	u32 header, control = 0;
5405 
5406 	if (ib->flags & AMDGPU_IB_FLAG_CE)
5407 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5408 	else
5409 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5410 
5411 	control |= ib->length_dw | (vmid << 24);
5412 
5413 	if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
5414 		control |= INDIRECT_BUFFER_PRE_ENB(1);
5415 
5416 		if (flags & AMDGPU_IB_PREEMPTED)
5417 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
5418 
5419 		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5420 			gfx_v9_0_ring_emit_de_meta(ring,
5421 						   (!amdgpu_sriov_vf(ring->adev) &&
5422 						   flags & AMDGPU_IB_PREEMPTED) ?
5423 						   true : false,
5424 						   job->gds_size > 0 && job->gds_base != 0);
5425 	}
5426 
5427 	amdgpu_ring_write(ring, header);
5428 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5429 	amdgpu_ring_write(ring,
5430 #ifdef __BIG_ENDIAN
5431 		(2 << 0) |
5432 #endif
5433 		lower_32_bits(ib->gpu_addr));
5434 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5435 	amdgpu_ring_ib_on_emit_cntl(ring);
5436 	amdgpu_ring_write(ring, control);
5437 }
5438 
5439 static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
5440 				     unsigned offset)
5441 {
5442 	u32 control = ring->ring[offset];
5443 
5444 	control |= INDIRECT_BUFFER_PRE_RESUME(1);
5445 	ring->ring[offset] = control;
5446 }
5447 
5448 static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
5449 					unsigned offset)
5450 {
5451 	struct amdgpu_device *adev = ring->adev;
5452 	void *ce_payload_cpu_addr;
5453 	uint64_t payload_offset, payload_size;
5454 
5455 	payload_size = sizeof(struct v9_ce_ib_state);
5456 
5457 	payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
5458 	ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
5459 
5460 	if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
5461 		memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
5462 	} else {
5463 		memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
5464 		       (ring->buf_mask + 1 - offset) << 2);
5465 		payload_size -= (ring->buf_mask + 1 - offset) << 2;
5466 		memcpy((void *)&ring->ring[0],
5467 		       ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
5468 		       payload_size);
5469 	}
5470 }
5471 
5472 static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
5473 					unsigned offset)
5474 {
5475 	struct amdgpu_device *adev = ring->adev;
5476 	void *de_payload_cpu_addr;
5477 	uint64_t payload_offset, payload_size;
5478 
5479 	payload_size = sizeof(struct v9_de_ib_state);
5480 
5481 	payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
5482 	de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
5483 
5484 	((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
5485 		IB_COMPLETION_STATUS_PREEMPTED;
5486 
5487 	if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
5488 		memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
5489 	} else {
5490 		memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
5491 		       (ring->buf_mask + 1 - offset) << 2);
5492 		payload_size -= (ring->buf_mask + 1 - offset) << 2;
5493 		memcpy((void *)&ring->ring[0],
5494 		       de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
5495 		       payload_size);
5496 	}
5497 }
5498 
5499 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5500 					  struct amdgpu_job *job,
5501 					  struct amdgpu_ib *ib,
5502 					  uint32_t flags)
5503 {
5504 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5505 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5506 
5507 	/* Currently, there is a high possibility to get wave ID mismatch
5508 	 * between ME and GDS, leading to a hw deadlock, because ME generates
5509 	 * different wave IDs than the GDS expects. This situation happens
5510 	 * randomly when at least 5 compute pipes use GDS ordered append.
5511 	 * The wave IDs generated by ME are also wrong after suspend/resume.
5512 	 * Those are probably bugs somewhere else in the kernel driver.
5513 	 *
5514 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5515 	 * GDS to 0 for this ring (me/pipe).
5516 	 */
5517 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5518 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5519 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5520 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5521 	}
5522 
5523 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5524 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5525 	amdgpu_ring_write(ring,
5526 #ifdef __BIG_ENDIAN
5527 				(2 << 0) |
5528 #endif
5529 				lower_32_bits(ib->gpu_addr));
5530 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5531 	amdgpu_ring_write(ring, control);
5532 }
5533 
5534 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5535 				     u64 seq, unsigned flags)
5536 {
5537 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5538 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5539 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5540 	bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
5541 	uint32_t dw2 = 0;
5542 
5543 	/* RELEASE_MEM - flush caches, send int */
5544 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5545 
5546 	if (writeback) {
5547 		dw2 = EOP_TC_NC_ACTION_EN;
5548 	} else {
5549 		dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN |
5550 				EOP_TC_MD_ACTION_EN;
5551 	}
5552 	dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5553 				EVENT_INDEX(5);
5554 	if (exec)
5555 		dw2 |= EOP_EXEC;
5556 
5557 	amdgpu_ring_write(ring, dw2);
5558 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5559 
5560 	/*
5561 	 * the address should be Qword aligned if 64bit write, Dword
5562 	 * aligned if only send 32bit data low (discard data high)
5563 	 */
5564 	if (write64bit)
5565 		BUG_ON(addr & 0x7);
5566 	else
5567 		BUG_ON(addr & 0x3);
5568 	amdgpu_ring_write(ring, lower_32_bits(addr));
5569 	amdgpu_ring_write(ring, upper_32_bits(addr));
5570 	amdgpu_ring_write(ring, lower_32_bits(seq));
5571 	amdgpu_ring_write(ring, upper_32_bits(seq));
5572 	amdgpu_ring_write(ring, 0);
5573 }
5574 
5575 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5576 {
5577 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5578 	uint32_t seq = ring->fence_drv.sync_seq;
5579 	uint64_t addr = ring->fence_drv.gpu_addr;
5580 
5581 	gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5582 			      lower_32_bits(addr), upper_32_bits(addr),
5583 			      seq, 0xffffffff, 4);
5584 }
5585 
5586 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5587 					unsigned vmid, uint64_t pd_addr)
5588 {
5589 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5590 
5591 	/* compute doesn't have PFP */
5592 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5593 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
5594 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5595 		amdgpu_ring_write(ring, 0x0);
5596 	}
5597 }
5598 
5599 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5600 {
5601 	return *ring->rptr_cpu_addr; /* gfx9 hardware is 32bit rptr */
5602 }
5603 
5604 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5605 {
5606 	u64 wptr;
5607 
5608 	/* XXX check if swapping is necessary on BE */
5609 	if (ring->use_doorbell)
5610 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5611 	else
5612 		BUG();
5613 	return wptr;
5614 }
5615 
5616 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5617 {
5618 	struct amdgpu_device *adev = ring->adev;
5619 
5620 	/* XXX check if swapping is necessary on BE */
5621 	if (ring->use_doorbell) {
5622 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5623 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5624 	} else{
5625 		BUG(); /* only DOORBELL method supported on gfx9 now */
5626 	}
5627 }
5628 
5629 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5630 					 u64 seq, unsigned int flags)
5631 {
5632 	struct amdgpu_device *adev = ring->adev;
5633 
5634 	/* we only allocate 32bit for each seq wb address */
5635 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5636 
5637 	/* write fence seq to the "addr" */
5638 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5639 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5640 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5641 	amdgpu_ring_write(ring, lower_32_bits(addr));
5642 	amdgpu_ring_write(ring, upper_32_bits(addr));
5643 	amdgpu_ring_write(ring, lower_32_bits(seq));
5644 
5645 	if (flags & AMDGPU_FENCE_FLAG_INT) {
5646 		/* set register to trigger INT */
5647 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5648 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5649 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5650 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5651 		amdgpu_ring_write(ring, 0);
5652 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5653 	}
5654 }
5655 
5656 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5657 {
5658 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5659 	amdgpu_ring_write(ring, 0);
5660 }
5661 
5662 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
5663 {
5664 	struct amdgpu_device *adev = ring->adev;
5665 	struct v9_ce_ib_state ce_payload = {0};
5666 	uint64_t offset, ce_payload_gpu_addr;
5667 	void *ce_payload_cpu_addr;
5668 	int cnt;
5669 
5670 	cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5671 
5672 	offset = offsetof(struct v9_gfx_meta_data, ce_payload);
5673 	ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5674 	ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5675 
5676 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5677 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5678 				 WRITE_DATA_DST_SEL(8) |
5679 				 WR_CONFIRM) |
5680 				 WRITE_DATA_CACHE_POLICY(0));
5681 	amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
5682 	amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));
5683 
5684 	amdgpu_ring_ib_on_emit_ce(ring);
5685 
5686 	if (resume)
5687 		amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
5688 					   sizeof(ce_payload) >> 2);
5689 	else
5690 		amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
5691 					   sizeof(ce_payload) >> 2);
5692 }
5693 
5694 static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
5695 {
5696 	int i, r = 0;
5697 	struct amdgpu_device *adev = ring->adev;
5698 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5699 	struct amdgpu_ring *kiq_ring = &kiq->ring;
5700 	unsigned long flags;
5701 
5702 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5703 		return -EINVAL;
5704 
5705 	spin_lock_irqsave(&kiq->ring_lock, flags);
5706 
5707 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5708 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
5709 		return -ENOMEM;
5710 	}
5711 
5712 	/* assert preemption condition */
5713 	amdgpu_ring_set_preempt_cond_exec(ring, false);
5714 
5715 	ring->trail_seq += 1;
5716 	amdgpu_ring_alloc(ring, 13);
5717 	gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
5718 				 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);
5719 
5720 	/* assert IB preemption, emit the trailing fence */
5721 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5722 				   ring->trail_fence_gpu_addr,
5723 				   ring->trail_seq);
5724 
5725 	amdgpu_ring_commit(kiq_ring);
5726 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
5727 
5728 	/* poll the trailing fence */
5729 	for (i = 0; i < adev->usec_timeout; i++) {
5730 		if (ring->trail_seq ==
5731 			le32_to_cpu(*ring->trail_fence_cpu_addr))
5732 			break;
5733 		udelay(1);
5734 	}
5735 
5736 	if (i >= adev->usec_timeout) {
5737 		r = -EINVAL;
5738 		drm_warn(adev_to_drm(adev), "ring %d timeout to preempt ib\n", ring->idx);
5739 	}
5740 
5741 	/*reset the CP_VMID_PREEMPT after trailing fence*/
5742 	amdgpu_ring_emit_wreg(ring,
5743 			      SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
5744 			      0x0);
5745 	amdgpu_ring_commit(ring);
5746 
5747 	/* deassert preemption condition */
5748 	amdgpu_ring_set_preempt_cond_exec(ring, true);
5749 	return r;
5750 }
5751 
5752 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
5753 {
5754 	struct amdgpu_device *adev = ring->adev;
5755 	struct v9_de_ib_state de_payload = {0};
5756 	uint64_t offset, gds_addr, de_payload_gpu_addr;
5757 	void *de_payload_cpu_addr;
5758 	int cnt;
5759 
5760 	offset = offsetof(struct v9_gfx_meta_data, de_payload);
5761 	de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5762 	de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5763 
5764 	gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5765 			 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5766 			 PAGE_SIZE);
5767 
5768 	if (usegds) {
5769 		de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5770 		de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5771 	}
5772 
5773 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5774 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5775 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5776 				 WRITE_DATA_DST_SEL(8) |
5777 				 WR_CONFIRM) |
5778 				 WRITE_DATA_CACHE_POLICY(0));
5779 	amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5780 	amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5781 
5782 	amdgpu_ring_ib_on_emit_de(ring);
5783 	if (resume)
5784 		amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5785 					   sizeof(de_payload) >> 2);
5786 	else
5787 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5788 					   sizeof(de_payload) >> 2);
5789 }
5790 
5791 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5792 				   bool secure)
5793 {
5794 	uint32_t v = secure ? FRAME_TMZ : 0;
5795 
5796 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5797 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5798 }
5799 
5800 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5801 {
5802 	uint32_t dw2 = 0;
5803 
5804 	gfx_v9_0_ring_emit_ce_meta(ring,
5805 				   (!amdgpu_sriov_vf(ring->adev) &&
5806 				   flags & AMDGPU_IB_PREEMPTED) ? true : false);
5807 
5808 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5809 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5810 		/* set load_global_config & load_global_uconfig */
5811 		dw2 |= 0x8001;
5812 		/* set load_cs_sh_regs */
5813 		dw2 |= 0x01000000;
5814 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
5815 		dw2 |= 0x10002;
5816 
5817 		/* set load_ce_ram if preamble presented */
5818 		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5819 			dw2 |= 0x10000000;
5820 	} else {
5821 		/* still load_ce_ram if this is the first time preamble presented
5822 		 * although there is no context switch happens.
5823 		 */
5824 		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5825 			dw2 |= 0x10000000;
5826 	}
5827 
5828 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5829 	amdgpu_ring_write(ring, dw2);
5830 	amdgpu_ring_write(ring, 0);
5831 }
5832 
5833 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
5834 						  uint64_t addr)
5835 {
5836 	unsigned ret;
5837 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5838 	amdgpu_ring_write(ring, lower_32_bits(addr));
5839 	amdgpu_ring_write(ring, upper_32_bits(addr));
5840 	/* discard following DWs if *cond_exec_gpu_addr==0 */
5841 	amdgpu_ring_write(ring, 0);
5842 	ret = ring->wptr & ring->buf_mask;
5843 	/* patch dummy value later */
5844 	amdgpu_ring_write(ring, 0);
5845 	return ret;
5846 }
5847 
5848 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5849 				    uint32_t reg_val_offs)
5850 {
5851 	struct amdgpu_device *adev = ring->adev;
5852 
5853 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5854 	amdgpu_ring_write(ring, 0 |	/* src: register*/
5855 				(5 << 8) |	/* dst: memory */
5856 				(1 << 20));	/* write confirm */
5857 	amdgpu_ring_write(ring, reg);
5858 	amdgpu_ring_write(ring, 0);
5859 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5860 				reg_val_offs * 4));
5861 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5862 				reg_val_offs * 4));
5863 }
5864 
5865 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5866 				    uint32_t val)
5867 {
5868 	uint32_t cmd = 0;
5869 
5870 	switch (ring->funcs->type) {
5871 	case AMDGPU_RING_TYPE_GFX:
5872 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5873 		break;
5874 	case AMDGPU_RING_TYPE_KIQ:
5875 		cmd = (1 << 16); /* no inc addr */
5876 		break;
5877 	default:
5878 		cmd = WR_CONFIRM;
5879 		break;
5880 	}
5881 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5882 	amdgpu_ring_write(ring, cmd);
5883 	amdgpu_ring_write(ring, reg);
5884 	amdgpu_ring_write(ring, 0);
5885 	amdgpu_ring_write(ring, val);
5886 }
5887 
5888 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5889 					uint32_t val, uint32_t mask)
5890 {
5891 	gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5892 }
5893 
5894 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5895 						  uint32_t reg0, uint32_t reg1,
5896 						  uint32_t ref, uint32_t mask)
5897 {
5898 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5899 	struct amdgpu_device *adev = ring->adev;
5900 	bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5901 		adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5902 
5903 	if (fw_version_ok)
5904 		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5905 				      ref, mask, 0x20);
5906 	else
5907 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5908 							   ref, mask);
5909 }
5910 
5911 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5912 {
5913 	struct amdgpu_device *adev = ring->adev;
5914 	uint32_t value = 0;
5915 
5916 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5917 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5918 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5919 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5920 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5921 	WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5922 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5923 }
5924 
5925 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5926 						 enum amdgpu_interrupt_state state)
5927 {
5928 	switch (state) {
5929 	case AMDGPU_IRQ_STATE_DISABLE:
5930 	case AMDGPU_IRQ_STATE_ENABLE:
5931 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5932 			       TIME_STAMP_INT_ENABLE,
5933 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5934 		break;
5935 	default:
5936 		break;
5937 	}
5938 }
5939 
5940 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5941 						     int me, int pipe,
5942 						     enum amdgpu_interrupt_state state)
5943 {
5944 	u32 mec_int_cntl, mec_int_cntl_reg;
5945 
5946 	/*
5947 	 * amdgpu controls only the first MEC. That's why this function only
5948 	 * handles the setting of interrupts for this specific MEC. All other
5949 	 * pipes' interrupts are set by amdkfd.
5950 	 */
5951 
5952 	if (me == 1) {
5953 		switch (pipe) {
5954 		case 0:
5955 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5956 			break;
5957 		case 1:
5958 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5959 			break;
5960 		case 2:
5961 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5962 			break;
5963 		case 3:
5964 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5965 			break;
5966 		default:
5967 			DRM_DEBUG("invalid pipe %d\n", pipe);
5968 			return;
5969 		}
5970 	} else {
5971 		DRM_DEBUG("invalid me %d\n", me);
5972 		return;
5973 	}
5974 
5975 	switch (state) {
5976 	case AMDGPU_IRQ_STATE_DISABLE:
5977 		mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
5978 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5979 					     TIME_STAMP_INT_ENABLE, 0);
5980 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5981 		break;
5982 	case AMDGPU_IRQ_STATE_ENABLE:
5983 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5984 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5985 					     TIME_STAMP_INT_ENABLE, 1);
5986 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5987 		break;
5988 	default:
5989 		break;
5990 	}
5991 }
5992 
5993 static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev,
5994 				     int me, int pipe)
5995 {
5996 	/*
5997 	 * amdgpu controls only the first MEC. That's why this function only
5998 	 * handles the setting of interrupts for this specific MEC. All other
5999 	 * pipes' interrupts are set by amdkfd.
6000 	 */
6001 	if (me != 1)
6002 		return 0;
6003 
6004 	switch (pipe) {
6005 	case 0:
6006 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
6007 	case 1:
6008 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
6009 	case 2:
6010 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
6011 	case 3:
6012 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
6013 	default:
6014 		return 0;
6015 	}
6016 }
6017 
6018 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6019 					     struct amdgpu_irq_src *source,
6020 					     unsigned type,
6021 					     enum amdgpu_interrupt_state state)
6022 {
6023 	u32 cp_int_cntl_reg, cp_int_cntl;
6024 	int i, j;
6025 
6026 	switch (state) {
6027 	case AMDGPU_IRQ_STATE_DISABLE:
6028 	case AMDGPU_IRQ_STATE_ENABLE:
6029 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6030 			       PRIV_REG_INT_ENABLE,
6031 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6032 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6033 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6034 				/* MECs start at 1 */
6035 				cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
6036 
6037 				if (cp_int_cntl_reg) {
6038 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6039 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6040 								    PRIV_REG_INT_ENABLE,
6041 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6042 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6043 				}
6044 			}
6045 		}
6046 		break;
6047 	default:
6048 		break;
6049 	}
6050 
6051 	return 0;
6052 }
6053 
6054 static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev,
6055 					   struct amdgpu_irq_src *source,
6056 					   unsigned type,
6057 					   enum amdgpu_interrupt_state state)
6058 {
6059 	u32 cp_int_cntl_reg, cp_int_cntl;
6060 	int i, j;
6061 
6062 	switch (state) {
6063 	case AMDGPU_IRQ_STATE_DISABLE:
6064 	case AMDGPU_IRQ_STATE_ENABLE:
6065 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6066 			       OPCODE_ERROR_INT_ENABLE,
6067 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6068 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6069 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6070 				/* MECs start at 1 */
6071 				cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
6072 
6073 				if (cp_int_cntl_reg) {
6074 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6075 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6076 								    OPCODE_ERROR_INT_ENABLE,
6077 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6078 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6079 				}
6080 			}
6081 		}
6082 		break;
6083 	default:
6084 		break;
6085 	}
6086 
6087 	return 0;
6088 }
6089 
6090 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6091 					      struct amdgpu_irq_src *source,
6092 					      unsigned type,
6093 					      enum amdgpu_interrupt_state state)
6094 {
6095 	switch (state) {
6096 	case AMDGPU_IRQ_STATE_DISABLE:
6097 	case AMDGPU_IRQ_STATE_ENABLE:
6098 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6099 			       PRIV_INSTR_INT_ENABLE,
6100 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6101 		break;
6102 	default:
6103 		break;
6104 	}
6105 
6106 	return 0;
6107 }
6108 
6109 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)				\
6110 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
6111 			CP_ECC_ERROR_INT_ENABLE, 1)
6112 
6113 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)			\
6114 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
6115 			CP_ECC_ERROR_INT_ENABLE, 0)
6116 
6117 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
6118 					      struct amdgpu_irq_src *source,
6119 					      unsigned type,
6120 					      enum amdgpu_interrupt_state state)
6121 {
6122 	switch (state) {
6123 	case AMDGPU_IRQ_STATE_DISABLE:
6124 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6125 				CP_ECC_ERROR_INT_ENABLE, 0);
6126 		DISABLE_ECC_ON_ME_PIPE(1, 0);
6127 		DISABLE_ECC_ON_ME_PIPE(1, 1);
6128 		DISABLE_ECC_ON_ME_PIPE(1, 2);
6129 		DISABLE_ECC_ON_ME_PIPE(1, 3);
6130 		break;
6131 
6132 	case AMDGPU_IRQ_STATE_ENABLE:
6133 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6134 				CP_ECC_ERROR_INT_ENABLE, 1);
6135 		ENABLE_ECC_ON_ME_PIPE(1, 0);
6136 		ENABLE_ECC_ON_ME_PIPE(1, 1);
6137 		ENABLE_ECC_ON_ME_PIPE(1, 2);
6138 		ENABLE_ECC_ON_ME_PIPE(1, 3);
6139 		break;
6140 	default:
6141 		break;
6142 	}
6143 
6144 	return 0;
6145 }
6146 
6147 
6148 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6149 					    struct amdgpu_irq_src *src,
6150 					    unsigned type,
6151 					    enum amdgpu_interrupt_state state)
6152 {
6153 	switch (type) {
6154 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6155 		gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
6156 		break;
6157 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6158 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6159 		break;
6160 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6161 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6162 		break;
6163 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6164 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6165 		break;
6166 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6167 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6168 		break;
6169 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6170 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6171 		break;
6172 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6173 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6174 		break;
6175 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6176 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6177 		break;
6178 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6179 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6180 		break;
6181 	default:
6182 		break;
6183 	}
6184 	return 0;
6185 }
6186 
6187 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
6188 			    struct amdgpu_irq_src *source,
6189 			    struct amdgpu_iv_entry *entry)
6190 {
6191 	int i;
6192 	u8 me_id, pipe_id, queue_id;
6193 	struct amdgpu_ring *ring;
6194 
6195 	DRM_DEBUG("IH: CP EOP\n");
6196 	me_id = (entry->ring_id & 0x0c) >> 2;
6197 	pipe_id = (entry->ring_id & 0x03) >> 0;
6198 	queue_id = (entry->ring_id & 0x70) >> 4;
6199 
6200 	switch (me_id) {
6201 	case 0:
6202 		if (adev->gfx.num_gfx_rings) {
6203 			if (!adev->gfx.mcbp) {
6204 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6205 			} else if (!amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) {
6206 				/* Fence signals are handled on the software rings*/
6207 				for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
6208 					amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]);
6209 			}
6210 		}
6211 		break;
6212 	case 1:
6213 	case 2:
6214 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6215 			ring = &adev->gfx.compute_ring[i];
6216 			/* Per-queue interrupt is supported for MEC starting from VI.
6217 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
6218 			  */
6219 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6220 				amdgpu_fence_process(ring);
6221 		}
6222 		break;
6223 	}
6224 	return 0;
6225 }
6226 
6227 static void gfx_v9_0_fault(struct amdgpu_device *adev,
6228 			   struct amdgpu_iv_entry *entry)
6229 {
6230 	u8 me_id, pipe_id, queue_id;
6231 	struct amdgpu_ring *ring;
6232 	int i;
6233 
6234 	me_id = (entry->ring_id & 0x0c) >> 2;
6235 	pipe_id = (entry->ring_id & 0x03) >> 0;
6236 	queue_id = (entry->ring_id & 0x70) >> 4;
6237 
6238 	switch (me_id) {
6239 	case 0:
6240 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6241 		break;
6242 	case 1:
6243 	case 2:
6244 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6245 			ring = &adev->gfx.compute_ring[i];
6246 			if (ring->me == me_id && ring->pipe == pipe_id &&
6247 			    ring->queue == queue_id)
6248 				drm_sched_fault(&ring->sched);
6249 		}
6250 		break;
6251 	}
6252 }
6253 
6254 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
6255 				 struct amdgpu_irq_src *source,
6256 				 struct amdgpu_iv_entry *entry)
6257 {
6258 	DRM_ERROR("Illegal register access in command stream\n");
6259 	gfx_v9_0_fault(adev, entry);
6260 	return 0;
6261 }
6262 
6263 static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev,
6264 			       struct amdgpu_irq_src *source,
6265 			       struct amdgpu_iv_entry *entry)
6266 {
6267 	DRM_ERROR("Illegal opcode in command stream\n");
6268 	gfx_v9_0_fault(adev, entry);
6269 	return 0;
6270 }
6271 
6272 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
6273 				  struct amdgpu_irq_src *source,
6274 				  struct amdgpu_iv_entry *entry)
6275 {
6276 	DRM_ERROR("Illegal instruction in command stream\n");
6277 	gfx_v9_0_fault(adev, entry);
6278 	return 0;
6279 }
6280 
6281 
6282 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
6283 	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
6284 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
6285 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
6286 	},
6287 	{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
6288 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
6289 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
6290 	},
6291 	{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
6292 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
6293 	  0, 0
6294 	},
6295 	{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
6296 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
6297 	  0, 0
6298 	},
6299 	{ "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
6300 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
6301 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
6302 	},
6303 	{ "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
6304 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
6305 	  0, 0
6306 	},
6307 	{ "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
6308 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
6309 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
6310 	},
6311 	{ "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
6312 	  SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
6313 	  SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
6314 	},
6315 	{ "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
6316 	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
6317 	  0, 0
6318 	},
6319 	{ "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
6320 	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
6321 	  0, 0
6322 	},
6323 	{ "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
6324 	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
6325 	  0, 0
6326 	},
6327 	{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6328 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
6329 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
6330 	},
6331 	{ "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6332 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
6333 	  0, 0
6334 	},
6335 	{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6336 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
6337 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
6338 	},
6339 	{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
6340 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6341 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
6342 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
6343 	},
6344 	{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
6345 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6346 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
6347 	  0, 0
6348 	},
6349 	{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
6350 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6351 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
6352 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
6353 	},
6354 	{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
6355 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6356 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
6357 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
6358 	},
6359 	{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
6360 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6361 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
6362 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
6363 	},
6364 	{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
6365 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6366 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
6367 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
6368 	},
6369 	{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
6370 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
6371 	  0, 0
6372 	},
6373 	{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6374 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
6375 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
6376 	},
6377 	{ "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6378 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6379 	  0, 0
6380 	},
6381 	{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6382 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6383 	  0, 0
6384 	},
6385 	{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6386 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6387 	  0, 0
6388 	},
6389 	{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6390 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6391 	  0, 0
6392 	},
6393 	{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6394 	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6395 	  0, 0
6396 	},
6397 	{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6398 	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6399 	  0, 0
6400 	},
6401 	{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6402 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6403 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6404 	},
6405 	{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6406 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6407 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6408 	},
6409 	{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6410 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6411 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6412 	},
6413 	{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6414 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6415 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6416 	},
6417 	{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6418 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6419 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6420 	},
6421 	{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6422 	  SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6423 	  0, 0
6424 	},
6425 	{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6426 	  SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6427 	  0, 0
6428 	},
6429 	{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6430 	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6431 	  0, 0
6432 	},
6433 	{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6434 	  SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6435 	  0, 0
6436 	},
6437 	{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6438 	  SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6439 	  0, 0
6440 	},
6441 	{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6442 	  SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6443 	  0, 0
6444 	},
6445 	{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6446 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6447 	  0, 0
6448 	},
6449 	{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6450 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6451 	  0, 0
6452 	},
6453 	{ "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6454 	  SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6455 	  0, 0
6456 	},
6457 	{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6458 	  SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6459 	  0, 0
6460 	},
6461 	{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6462 	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6463 	  0, 0
6464 	},
6465 	{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6466 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6467 	  0, 0
6468 	},
6469 	{ "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6470 	  SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6471 	  0, 0
6472 	},
6473 	{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6474 	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6475 	  0, 0
6476 	},
6477 	{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6478 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6479 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6480 	},
6481 	{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6482 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6483 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6484 	},
6485 	{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6486 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6487 	  0, 0
6488 	},
6489 	{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6490 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6491 	  0, 0
6492 	},
6493 	{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6494 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6495 	  0, 0
6496 	},
6497 	{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6498 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6499 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6500 	},
6501 	{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6502 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6503 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6504 	},
6505 	{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6506 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6507 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6508 	},
6509 	{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6510 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6511 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6512 	},
6513 	{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6514 	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6515 	  0, 0
6516 	},
6517 	{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6518 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6519 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6520 	},
6521 	{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6522 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6523 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6524 	},
6525 	{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6526 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6527 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6528 	},
6529 	{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6530 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6531 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6532 	},
6533 	{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6534 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6535 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6536 	},
6537 	{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6538 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6539 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6540 	},
6541 	{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6542 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6543 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6544 	},
6545 	{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6546 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6547 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6548 	},
6549 	{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6550 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6551 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6552 	},
6553 	{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6554 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6555 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6556 	},
6557 	{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6558 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6559 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6560 	},
6561 	{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6562 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6563 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6564 	},
6565 	{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6566 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6567 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6568 	},
6569 	{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6570 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6571 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6572 	},
6573 	{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6574 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6575 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6576 	},
6577 	{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6578 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6579 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6580 	},
6581 	{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6582 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6583 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6584 	},
6585 	{ "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6586 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6587 	  0, 0
6588 	},
6589 	{ "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6590 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6591 	  0, 0
6592 	},
6593 	{ "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6594 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6595 	  0, 0
6596 	},
6597 	{ "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6598 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6599 	  0, 0
6600 	},
6601 	{ "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6602 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6603 	  0, 0
6604 	},
6605 	{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6606 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6607 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6608 	},
6609 	{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6610 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6611 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6612 	},
6613 	{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6614 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6615 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6616 	},
6617 	{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6618 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6619 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6620 	},
6621 	{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6622 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6623 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6624 	},
6625 	{ "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6626 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6627 	  0, 0
6628 	},
6629 	{ "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6630 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6631 	  0, 0
6632 	},
6633 	{ "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6634 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6635 	  0, 0
6636 	},
6637 	{ "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6638 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6639 	  0, 0
6640 	},
6641 	{ "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6642 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6643 	  0, 0
6644 	},
6645 	{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6646 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6647 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6648 	},
6649 	{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6650 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6651 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6652 	},
6653 	{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6654 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6655 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6656 	},
6657 	{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6658 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6659 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6660 	},
6661 	{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6662 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6663 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6664 	},
6665 	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6666 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6667 	  0, 0
6668 	},
6669 	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6670 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6671 	  0, 0
6672 	},
6673 	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6674 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6675 	  0, 0
6676 	},
6677 	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6678 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6679 	  0, 0
6680 	},
6681 	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6682 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6683 	  0, 0
6684 	},
6685 	{ "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6686 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6687 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6688 	},
6689 	{ "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6690 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6691 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6692 	},
6693 	{ "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6694 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6695 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6696 	},
6697 	{ "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6698 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6699 	  0, 0
6700 	},
6701 	{ "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6702 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6703 	  0, 0
6704 	},
6705 	{ "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6706 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6707 	  0, 0
6708 	},
6709 	{ "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6710 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6711 	  0, 0
6712 	},
6713 	{ "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6714 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6715 	  0, 0
6716 	},
6717 	{ "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6718 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6719 	  0, 0
6720 	}
6721 };
6722 
6723 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6724 				     void *inject_if, uint32_t instance_mask)
6725 {
6726 	struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6727 	int ret;
6728 	struct ta_ras_trigger_error_input block_info = { 0 };
6729 
6730 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6731 		return -EINVAL;
6732 
6733 	if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6734 		return -EINVAL;
6735 
6736 	if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6737 		return -EPERM;
6738 
6739 	if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6740 	      info->head.type)) {
6741 		DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6742 			ras_gfx_subblocks[info->head.sub_block_index].name,
6743 			info->head.type);
6744 		return -EPERM;
6745 	}
6746 
6747 	if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6748 	      info->head.type)) {
6749 		DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6750 			ras_gfx_subblocks[info->head.sub_block_index].name,
6751 			info->head.type);
6752 		return -EPERM;
6753 	}
6754 
6755 	block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6756 	block_info.sub_block_index =
6757 		ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6758 	block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6759 	block_info.address = info->address;
6760 	block_info.value = info->value;
6761 
6762 	mutex_lock(&adev->grbm_idx_mutex);
6763 	ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask);
6764 	mutex_unlock(&adev->grbm_idx_mutex);
6765 
6766 	return ret;
6767 }
6768 
6769 static const char * const vml2_mems[] = {
6770 	"UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6771 	"UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6772 	"UTC_VML2_BANK_CACHE_0_4K_MEM0",
6773 	"UTC_VML2_BANK_CACHE_0_4K_MEM1",
6774 	"UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6775 	"UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6776 	"UTC_VML2_BANK_CACHE_1_4K_MEM0",
6777 	"UTC_VML2_BANK_CACHE_1_4K_MEM1",
6778 	"UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6779 	"UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6780 	"UTC_VML2_BANK_CACHE_2_4K_MEM0",
6781 	"UTC_VML2_BANK_CACHE_2_4K_MEM1",
6782 	"UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6783 	"UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6784 	"UTC_VML2_BANK_CACHE_3_4K_MEM0",
6785 	"UTC_VML2_BANK_CACHE_3_4K_MEM1",
6786 };
6787 
6788 static const char * const vml2_walker_mems[] = {
6789 	"UTC_VML2_CACHE_PDE0_MEM0",
6790 	"UTC_VML2_CACHE_PDE0_MEM1",
6791 	"UTC_VML2_CACHE_PDE1_MEM0",
6792 	"UTC_VML2_CACHE_PDE1_MEM1",
6793 	"UTC_VML2_CACHE_PDE2_MEM0",
6794 	"UTC_VML2_CACHE_PDE2_MEM1",
6795 	"UTC_VML2_RDIF_LOG_FIFO",
6796 };
6797 
6798 static const char * const atc_l2_cache_2m_mems[] = {
6799 	"UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6800 	"UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6801 	"UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6802 	"UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6803 };
6804 
6805 static const char *atc_l2_cache_4k_mems[] = {
6806 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6807 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6808 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6809 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6810 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6811 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6812 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6813 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6814 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6815 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6816 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6817 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6818 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6819 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6820 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6821 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6822 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6823 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6824 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6825 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6826 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6827 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6828 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6829 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6830 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6831 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6832 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6833 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6834 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6835 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6836 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6837 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6838 };
6839 
6840 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6841 					 struct ras_err_data *err_data)
6842 {
6843 	uint32_t i, data;
6844 	uint32_t sec_count, ded_count;
6845 
6846 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6847 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6848 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6849 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6850 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6851 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6852 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6853 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6854 
6855 	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6856 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6857 		data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6858 
6859 		sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6860 		if (sec_count) {
6861 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6862 				"SEC %d\n", i, vml2_mems[i], sec_count);
6863 			err_data->ce_count += sec_count;
6864 		}
6865 
6866 		ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6867 		if (ded_count) {
6868 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6869 				"DED %d\n", i, vml2_mems[i], ded_count);
6870 			err_data->ue_count += ded_count;
6871 		}
6872 	}
6873 
6874 	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6875 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6876 		data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6877 
6878 		sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6879 						SEC_COUNT);
6880 		if (sec_count) {
6881 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6882 				"SEC %d\n", i, vml2_walker_mems[i], sec_count);
6883 			err_data->ce_count += sec_count;
6884 		}
6885 
6886 		ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6887 						DED_COUNT);
6888 		if (ded_count) {
6889 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6890 				"DED %d\n", i, vml2_walker_mems[i], ded_count);
6891 			err_data->ue_count += ded_count;
6892 		}
6893 	}
6894 
6895 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6896 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6897 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6898 
6899 		sec_count = (data & 0x00006000L) >> 0xd;
6900 		if (sec_count) {
6901 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6902 				"SEC %d\n", i, atc_l2_cache_2m_mems[i],
6903 				sec_count);
6904 			err_data->ce_count += sec_count;
6905 		}
6906 	}
6907 
6908 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6909 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6910 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6911 
6912 		sec_count = (data & 0x00006000L) >> 0xd;
6913 		if (sec_count) {
6914 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6915 				"SEC %d\n", i, atc_l2_cache_4k_mems[i],
6916 				sec_count);
6917 			err_data->ce_count += sec_count;
6918 		}
6919 
6920 		ded_count = (data & 0x00018000L) >> 0xf;
6921 		if (ded_count) {
6922 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6923 				"DED %d\n", i, atc_l2_cache_4k_mems[i],
6924 				ded_count);
6925 			err_data->ue_count += ded_count;
6926 		}
6927 	}
6928 
6929 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6930 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6931 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6932 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6933 
6934 	return 0;
6935 }
6936 
6937 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6938 	const struct soc15_reg_entry *reg,
6939 	uint32_t se_id, uint32_t inst_id, uint32_t value,
6940 	uint32_t *sec_count, uint32_t *ded_count)
6941 {
6942 	uint32_t i;
6943 	uint32_t sec_cnt, ded_cnt;
6944 
6945 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6946 		if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6947 			gfx_v9_0_ras_fields[i].seg != reg->seg ||
6948 			gfx_v9_0_ras_fields[i].inst != reg->inst)
6949 			continue;
6950 
6951 		sec_cnt = (value &
6952 				gfx_v9_0_ras_fields[i].sec_count_mask) >>
6953 				gfx_v9_0_ras_fields[i].sec_count_shift;
6954 		if (sec_cnt) {
6955 			dev_info(adev->dev, "GFX SubBlock %s, "
6956 				"Instance[%d][%d], SEC %d\n",
6957 				gfx_v9_0_ras_fields[i].name,
6958 				se_id, inst_id,
6959 				sec_cnt);
6960 			*sec_count += sec_cnt;
6961 		}
6962 
6963 		ded_cnt = (value &
6964 				gfx_v9_0_ras_fields[i].ded_count_mask) >>
6965 				gfx_v9_0_ras_fields[i].ded_count_shift;
6966 		if (ded_cnt) {
6967 			dev_info(adev->dev, "GFX SubBlock %s, "
6968 				"Instance[%d][%d], DED %d\n",
6969 				gfx_v9_0_ras_fields[i].name,
6970 				se_id, inst_id,
6971 				ded_cnt);
6972 			*ded_count += ded_cnt;
6973 		}
6974 	}
6975 
6976 	return 0;
6977 }
6978 
6979 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6980 {
6981 	int i, j, k;
6982 
6983 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6984 		return;
6985 
6986 	/* read back registers to clear the counters */
6987 	mutex_lock(&adev->grbm_idx_mutex);
6988 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6989 		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6990 			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6991 				amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0);
6992 				RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6993 			}
6994 		}
6995 	}
6996 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6997 	mutex_unlock(&adev->grbm_idx_mutex);
6998 
6999 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
7000 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
7001 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
7002 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
7003 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
7004 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
7005 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
7006 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
7007 
7008 	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
7009 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
7010 		RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
7011 	}
7012 
7013 	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
7014 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
7015 		RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
7016 	}
7017 
7018 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
7019 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
7020 		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
7021 	}
7022 
7023 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
7024 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
7025 		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
7026 	}
7027 
7028 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
7029 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
7030 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
7031 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
7032 }
7033 
7034 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
7035 					  void *ras_error_status)
7036 {
7037 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
7038 	uint32_t sec_count = 0, ded_count = 0;
7039 	uint32_t i, j, k;
7040 	uint32_t reg_value;
7041 
7042 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
7043 		return;
7044 
7045 	err_data->ue_count = 0;
7046 	err_data->ce_count = 0;
7047 
7048 	mutex_lock(&adev->grbm_idx_mutex);
7049 
7050 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
7051 		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
7052 			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
7053 				amdgpu_gfx_select_se_sh(adev, j, 0, k, 0);
7054 				reg_value =
7055 					RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
7056 				if (reg_value)
7057 					gfx_v9_0_ras_error_count(adev,
7058 						&gfx_v9_0_edc_counter_regs[i],
7059 						j, k, reg_value,
7060 						&sec_count, &ded_count);
7061 			}
7062 		}
7063 	}
7064 
7065 	err_data->ce_count += sec_count;
7066 	err_data->ue_count += ded_count;
7067 
7068 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7069 	mutex_unlock(&adev->grbm_idx_mutex);
7070 
7071 	gfx_v9_0_query_utc_edc_status(adev, err_data);
7072 }
7073 
7074 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
7075 {
7076 	const unsigned int cp_coher_cntl =
7077 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
7078 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
7079 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
7080 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
7081 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
7082 
7083 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
7084 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
7085 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
7086 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
7087 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
7088 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
7089 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
7090 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
7091 }
7092 
7093 static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
7094 					uint32_t pipe, bool enable)
7095 {
7096 	struct amdgpu_device *adev = ring->adev;
7097 	uint32_t val;
7098 	uint32_t wcl_cs_reg;
7099 
7100 	/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
7101 	val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
7102 
7103 	switch (pipe) {
7104 	case 0:
7105 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
7106 		break;
7107 	case 1:
7108 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
7109 		break;
7110 	case 2:
7111 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
7112 		break;
7113 	case 3:
7114 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
7115 		break;
7116 	default:
7117 		DRM_DEBUG("invalid pipe %d\n", pipe);
7118 		return;
7119 	}
7120 
7121 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
7122 
7123 }
7124 static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
7125 {
7126 	struct amdgpu_device *adev = ring->adev;
7127 	uint32_t val;
7128 	int i;
7129 
7130 
7131 	/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
7132 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
7133 	 * around 25% of gpu resources.
7134 	 */
7135 	val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
7136 	amdgpu_ring_emit_wreg(ring,
7137 			      SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
7138 			      val);
7139 
7140 	/* Restrict waves for normal/low priority compute queues as well
7141 	 * to get best QoS for high priority compute jobs.
7142 	 *
7143 	 * amdgpu controls only 1st ME(0-3 CS pipes).
7144 	 */
7145 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
7146 		if (i != ring->pipe)
7147 			gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
7148 
7149 	}
7150 }
7151 
7152 static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
7153 {
7154 	/* Header itself is a NOP packet */
7155 	if (num_nop == 1) {
7156 		amdgpu_ring_write(ring, ring->funcs->nop);
7157 		return;
7158 	}
7159 
7160 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
7161 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
7162 
7163 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
7164 	amdgpu_ring_insert_nop(ring, num_nop - 1);
7165 }
7166 
7167 static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
7168 			      unsigned int vmid,
7169 			      struct amdgpu_fence *timedout_fence)
7170 {
7171 	struct amdgpu_device *adev = ring->adev;
7172 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
7173 	struct amdgpu_ring *kiq_ring = &kiq->ring;
7174 	unsigned long flags;
7175 	int i, r;
7176 
7177 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
7178 		return -EINVAL;
7179 
7180 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
7181 
7182 	spin_lock_irqsave(&kiq->ring_lock, flags);
7183 
7184 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
7185 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
7186 		return -ENOMEM;
7187 	}
7188 
7189 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
7190 				   0, 0);
7191 	amdgpu_ring_commit(kiq_ring);
7192 
7193 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
7194 
7195 	r = amdgpu_ring_test_ring(kiq_ring);
7196 	if (r)
7197 		return r;
7198 
7199 	/* make sure dequeue is complete*/
7200 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
7201 	mutex_lock(&adev->srbm_mutex);
7202 	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
7203 	for (i = 0; i < adev->usec_timeout; i++) {
7204 		if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
7205 			break;
7206 		udelay(1);
7207 	}
7208 	if (i >= adev->usec_timeout)
7209 		r = -ETIMEDOUT;
7210 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
7211 	mutex_unlock(&adev->srbm_mutex);
7212 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
7213 	if (r) {
7214 		dev_err(adev->dev, "fail to wait on hqd deactive\n");
7215 		return r;
7216 	}
7217 
7218 	r = gfx_v9_0_kcq_init_queue(ring, true);
7219 	if (r) {
7220 		dev_err(adev->dev, "fail to init kcq\n");
7221 		return r;
7222 	}
7223 	spin_lock_irqsave(&kiq->ring_lock, flags);
7224 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
7225 	if (r) {
7226 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
7227 		return -ENOMEM;
7228 	}
7229 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
7230 	amdgpu_ring_commit(kiq_ring);
7231 	r = amdgpu_ring_test_ring(kiq_ring);
7232 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
7233 	if (r) {
7234 		DRM_ERROR("fail to remap queue\n");
7235 		return r;
7236 	}
7237 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
7238 }
7239 
7240 static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
7241 {
7242 	struct amdgpu_device *adev = ip_block->adev;
7243 	uint32_t i, j, k, reg, index = 0;
7244 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
7245 
7246 	if (!adev->gfx.ip_dump_core)
7247 		return;
7248 
7249 	for (i = 0; i < reg_count; i++)
7250 		drm_printf(p, "%-50s \t 0x%08x\n",
7251 			   gc_reg_list_9[i].reg_name,
7252 			   adev->gfx.ip_dump_core[i]);
7253 
7254 	/* print compute queue registers for all instances */
7255 	if (!adev->gfx.ip_dump_compute_queues)
7256 		return;
7257 
7258 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
7259 	drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
7260 		   adev->gfx.mec.num_mec,
7261 		   adev->gfx.mec.num_pipe_per_mec,
7262 		   adev->gfx.mec.num_queue_per_pipe);
7263 
7264 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7265 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7266 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7267 				drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
7268 				for (reg = 0; reg < reg_count; reg++) {
7269 					if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
7270 						drm_printf(p, "%-50s \t 0x%08x\n",
7271 							   "mmCP_MEC_ME2_HEADER_DUMP",
7272 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7273 					else
7274 						drm_printf(p, "%-50s \t 0x%08x\n",
7275 							   gc_cp_reg_list_9[reg].reg_name,
7276 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7277 				}
7278 				index += reg_count;
7279 			}
7280 		}
7281 	}
7282 
7283 }
7284 
7285 static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
7286 {
7287 	struct amdgpu_device *adev = ip_block->adev;
7288 	uint32_t i, j, k, reg, index = 0;
7289 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
7290 
7291 	if (!adev->gfx.ip_dump_core || !adev->gfx.num_gfx_rings)
7292 		return;
7293 
7294 	amdgpu_gfx_off_ctrl(adev, false);
7295 	for (i = 0; i < reg_count; i++)
7296 		adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_9[i]));
7297 	amdgpu_gfx_off_ctrl(adev, true);
7298 
7299 	/* dump compute queue registers for all instances */
7300 	if (!adev->gfx.ip_dump_compute_queues)
7301 		return;
7302 
7303 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
7304 	amdgpu_gfx_off_ctrl(adev, false);
7305 	mutex_lock(&adev->srbm_mutex);
7306 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7307 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7308 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7309 				/* ME0 is for GFX so start from 1 for CP */
7310 				soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
7311 
7312 				for (reg = 0; reg < reg_count; reg++) {
7313 					if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
7314 						adev->gfx.ip_dump_compute_queues[index + reg] =
7315 							RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
7316 					else
7317 						adev->gfx.ip_dump_compute_queues[index + reg] =
7318 							RREG32(SOC15_REG_ENTRY_OFFSET(
7319 								       gc_cp_reg_list_9[reg]));
7320 				}
7321 				index += reg_count;
7322 			}
7323 		}
7324 	}
7325 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
7326 	mutex_unlock(&adev->srbm_mutex);
7327 	amdgpu_gfx_off_ctrl(adev, true);
7328 
7329 }
7330 
7331 static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
7332 {
7333 	struct amdgpu_device *adev = ring->adev;
7334 
7335 	/* Emit the cleaner shader */
7336 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
7337 		amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
7338 	else
7339 		amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));
7340 
7341 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
7342 }
7343 
7344 static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
7345 {
7346 	struct amdgpu_device *adev = ring->adev;
7347 	struct amdgpu_ip_block *gfx_block =
7348 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
7349 
7350 	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
7351 
7352 	/* Raven and PCO APUs seem to have stability issues
7353 	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
7354 	 * submission and allow again afterwards.
7355 	 */
7356 	if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
7357 		gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_UNGATE);
7358 }
7359 
7360 static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
7361 {
7362 	struct amdgpu_device *adev = ring->adev;
7363 	struct amdgpu_ip_block *gfx_block =
7364 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
7365 
7366 	/* Raven and PCO APUs seem to have stability issues
7367 	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
7368 	 * submission and allow again afterwards.
7369 	 */
7370 	if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
7371 		gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_GATE);
7372 
7373 	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
7374 }
7375 
7376 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
7377 	.name = "gfx_v9_0",
7378 	.early_init = gfx_v9_0_early_init,
7379 	.late_init = gfx_v9_0_late_init,
7380 	.sw_init = gfx_v9_0_sw_init,
7381 	.sw_fini = gfx_v9_0_sw_fini,
7382 	.hw_init = gfx_v9_0_hw_init,
7383 	.hw_fini = gfx_v9_0_hw_fini,
7384 	.suspend = gfx_v9_0_suspend,
7385 	.resume = gfx_v9_0_resume,
7386 	.is_idle = gfx_v9_0_is_idle,
7387 	.wait_for_idle = gfx_v9_0_wait_for_idle,
7388 	.soft_reset = gfx_v9_0_soft_reset,
7389 	.set_clockgating_state = gfx_v9_0_set_clockgating_state,
7390 	.set_powergating_state = gfx_v9_0_set_powergating_state,
7391 	.get_clockgating_state = gfx_v9_0_get_clockgating_state,
7392 	.dump_ip_state = gfx_v9_ip_dump,
7393 	.print_ip_state = gfx_v9_ip_print,
7394 };
7395 
7396 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
7397 	.type = AMDGPU_RING_TYPE_GFX,
7398 	.align_mask = 0xff,
7399 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7400 	.support_64bit_ptrs = true,
7401 	.secure_submission_supported = true,
7402 	.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
7403 	.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
7404 	.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
7405 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
7406 		5 +  /* COND_EXEC */
7407 		7 +  /* PIPELINE_SYNC */
7408 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7409 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7410 		2 + /* VM_FLUSH */
7411 		8 +  /* FENCE for VM_FLUSH */
7412 		20 + /* GDS switch */
7413 		4 + /* double SWITCH_BUFFER,
7414 		       the first COND_EXEC jump to the place just
7415 			   prior to this double SWITCH_BUFFER  */
7416 		5 + /* COND_EXEC */
7417 		7 +	 /*	HDP_flush */
7418 		4 +	 /*	VGT_flush */
7419 		14 + /*	CE_META */
7420 		31 + /*	DE_META */
7421 		3 + /* CNTX_CTRL */
7422 		5 + /* HDP_INVL */
7423 		8 + 8 + /* FENCE x2 */
7424 		2 + /* SWITCH_BUFFER */
7425 		7 + /* gfx_v9_0_emit_mem_sync */
7426 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7427 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
7428 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
7429 	.emit_fence = gfx_v9_0_ring_emit_fence,
7430 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7431 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7432 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7433 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7434 	.test_ring = gfx_v9_0_ring_test_ring,
7435 	.insert_nop = gfx_v9_ring_insert_nop,
7436 	.pad_ib = amdgpu_ring_generic_pad_ib,
7437 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
7438 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
7439 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
7440 	.preempt_ib = gfx_v9_0_ring_preempt_ib,
7441 	.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
7442 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7443 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7444 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7445 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7446 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7447 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7448 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
7449 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
7450 };
7451 
7452 static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
7453 	.type = AMDGPU_RING_TYPE_GFX,
7454 	.align_mask = 0xff,
7455 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7456 	.support_64bit_ptrs = true,
7457 	.secure_submission_supported = true,
7458 	.get_rptr = amdgpu_sw_ring_get_rptr_gfx,
7459 	.get_wptr = amdgpu_sw_ring_get_wptr_gfx,
7460 	.set_wptr = amdgpu_sw_ring_set_wptr_gfx,
7461 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
7462 		5 +  /* COND_EXEC */
7463 		7 +  /* PIPELINE_SYNC */
7464 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7465 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7466 		2 + /* VM_FLUSH */
7467 		8 +  /* FENCE for VM_FLUSH */
7468 		20 + /* GDS switch */
7469 		4 + /* double SWITCH_BUFFER,
7470 		     * the first COND_EXEC jump to the place just
7471 		     * prior to this double SWITCH_BUFFER
7472 		     */
7473 		5 + /* COND_EXEC */
7474 		7 +	 /*	HDP_flush */
7475 		4 +	 /*	VGT_flush */
7476 		14 + /*	CE_META */
7477 		31 + /*	DE_META */
7478 		3 + /* CNTX_CTRL */
7479 		5 + /* HDP_INVL */
7480 		8 + 8 + /* FENCE x2 */
7481 		2 + /* SWITCH_BUFFER */
7482 		7 + /* gfx_v9_0_emit_mem_sync */
7483 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7484 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
7485 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
7486 	.emit_fence = gfx_v9_0_ring_emit_fence,
7487 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7488 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7489 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7490 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7491 	.test_ring = gfx_v9_0_ring_test_ring,
7492 	.test_ib = gfx_v9_0_ring_test_ib,
7493 	.insert_nop = gfx_v9_ring_insert_nop,
7494 	.pad_ib = amdgpu_ring_generic_pad_ib,
7495 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
7496 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
7497 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
7498 	.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
7499 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7500 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7501 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7502 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7503 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7504 	.patch_cntl = gfx_v9_0_ring_patch_cntl,
7505 	.patch_de = gfx_v9_0_ring_patch_de_meta,
7506 	.patch_ce = gfx_v9_0_ring_patch_ce_meta,
7507 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7508 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
7509 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
7510 };
7511 
7512 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7513 	.type = AMDGPU_RING_TYPE_COMPUTE,
7514 	.align_mask = 0xff,
7515 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7516 	.support_64bit_ptrs = true,
7517 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
7518 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
7519 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
7520 	.emit_frame_size =
7521 		20 + /* gfx_v9_0_ring_emit_gds_switch */
7522 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
7523 		5 + /* hdp invalidate */
7524 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
7525 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7526 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7527 		8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7528 		7 + /* gfx_v9_0_emit_mem_sync */
7529 		5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
7530 		15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
7531 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7532 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
7533 	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
7534 	.emit_fence = gfx_v9_0_ring_emit_fence,
7535 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7536 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7537 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7538 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7539 	.test_ring = gfx_v9_0_ring_test_ring,
7540 	.test_ib = gfx_v9_0_ring_test_ib,
7541 	.insert_nop = gfx_v9_ring_insert_nop,
7542 	.pad_ib = amdgpu_ring_generic_pad_ib,
7543 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7544 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7545 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7546 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7547 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7548 	.emit_wave_limit = gfx_v9_0_emit_wave_limit,
7549 	.reset = gfx_v9_0_reset_kcq,
7550 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7551 	.begin_use = gfx_v9_0_ring_begin_use_compute,
7552 	.end_use = gfx_v9_0_ring_end_use_compute,
7553 };
7554 
7555 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7556 	.type = AMDGPU_RING_TYPE_KIQ,
7557 	.align_mask = 0xff,
7558 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7559 	.support_64bit_ptrs = true,
7560 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
7561 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
7562 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
7563 	.emit_frame_size =
7564 		20 + /* gfx_v9_0_ring_emit_gds_switch */
7565 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
7566 		5 + /* hdp invalidate */
7567 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
7568 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7569 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7570 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
7571 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
7572 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
7573 	.test_ring = gfx_v9_0_ring_test_ring,
7574 	.insert_nop = amdgpu_ring_insert_nop,
7575 	.pad_ib = amdgpu_ring_generic_pad_ib,
7576 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
7577 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7578 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7579 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7580 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7581 };
7582 
7583 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
7584 {
7585 	int i;
7586 
7587 	adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
7588 
7589 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7590 		adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
7591 
7592 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
7593 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
7594 			adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx;
7595 	}
7596 
7597 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
7598 		adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
7599 }
7600 
7601 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
7602 	.set = gfx_v9_0_set_eop_interrupt_state,
7603 	.process = gfx_v9_0_eop_irq,
7604 };
7605 
7606 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
7607 	.set = gfx_v9_0_set_priv_reg_fault_state,
7608 	.process = gfx_v9_0_priv_reg_irq,
7609 };
7610 
7611 static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = {
7612 	.set = gfx_v9_0_set_bad_op_fault_state,
7613 	.process = gfx_v9_0_bad_op_irq,
7614 };
7615 
7616 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
7617 	.set = gfx_v9_0_set_priv_inst_fault_state,
7618 	.process = gfx_v9_0_priv_inst_irq,
7619 };
7620 
7621 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
7622 	.set = gfx_v9_0_set_cp_ecc_error_state,
7623 	.process = amdgpu_gfx_cp_ecc_error_irq,
7624 };
7625 
7626 
7627 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
7628 {
7629 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7630 	adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
7631 
7632 	adev->gfx.priv_reg_irq.num_types = 1;
7633 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
7634 
7635 	adev->gfx.bad_op_irq.num_types = 1;
7636 	adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs;
7637 
7638 	adev->gfx.priv_inst_irq.num_types = 1;
7639 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
7640 
7641 	adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
7642 	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
7643 }
7644 
7645 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
7646 {
7647 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7648 	case IP_VERSION(9, 0, 1):
7649 	case IP_VERSION(9, 2, 1):
7650 	case IP_VERSION(9, 4, 0):
7651 	case IP_VERSION(9, 2, 2):
7652 	case IP_VERSION(9, 1, 0):
7653 	case IP_VERSION(9, 4, 1):
7654 	case IP_VERSION(9, 3, 0):
7655 	case IP_VERSION(9, 4, 2):
7656 		adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
7657 		break;
7658 	default:
7659 		break;
7660 	}
7661 }
7662 
7663 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
7664 {
7665 	/* init asci gds info */
7666 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7667 	case IP_VERSION(9, 0, 1):
7668 	case IP_VERSION(9, 2, 1):
7669 	case IP_VERSION(9, 4, 0):
7670 		adev->gds.gds_size = 0x10000;
7671 		break;
7672 	case IP_VERSION(9, 2, 2):
7673 	case IP_VERSION(9, 1, 0):
7674 	case IP_VERSION(9, 4, 1):
7675 		adev->gds.gds_size = 0x1000;
7676 		break;
7677 	case IP_VERSION(9, 4, 2):
7678 		/* aldebaran removed all the GDS internal memory,
7679 		 * only support GWS opcode in kernel, like barrier
7680 		 * semaphore.etc */
7681 		adev->gds.gds_size = 0;
7682 		break;
7683 	default:
7684 		adev->gds.gds_size = 0x10000;
7685 		break;
7686 	}
7687 
7688 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7689 	case IP_VERSION(9, 0, 1):
7690 	case IP_VERSION(9, 4, 0):
7691 		adev->gds.gds_compute_max_wave_id = 0x7ff;
7692 		break;
7693 	case IP_VERSION(9, 2, 1):
7694 		adev->gds.gds_compute_max_wave_id = 0x27f;
7695 		break;
7696 	case IP_VERSION(9, 2, 2):
7697 	case IP_VERSION(9, 1, 0):
7698 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
7699 			adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
7700 		else
7701 			adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
7702 		break;
7703 	case IP_VERSION(9, 4, 1):
7704 		adev->gds.gds_compute_max_wave_id = 0xfff;
7705 		break;
7706 	case IP_VERSION(9, 4, 2):
7707 		/* deprecated for Aldebaran, no usage at all */
7708 		adev->gds.gds_compute_max_wave_id = 0;
7709 		break;
7710 	default:
7711 		/* this really depends on the chip */
7712 		adev->gds.gds_compute_max_wave_id = 0x7ff;
7713 		break;
7714 	}
7715 
7716 	adev->gds.gws_size = 64;
7717 	adev->gds.oa_size = 16;
7718 }
7719 
7720 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7721 						 u32 bitmap)
7722 {
7723 	u32 data;
7724 
7725 	if (!bitmap)
7726 		return;
7727 
7728 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7729 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7730 
7731 	WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
7732 }
7733 
7734 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7735 {
7736 	u32 data, mask;
7737 
7738 	data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
7739 	data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
7740 
7741 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7742 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7743 
7744 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7745 
7746 	return (~data) & mask;
7747 }
7748 
7749 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
7750 				 struct amdgpu_cu_info *cu_info)
7751 {
7752 	int i, j, k, counter, active_cu_number = 0;
7753 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7754 	unsigned disable_masks[4 * 4];
7755 
7756 	if (!adev || !cu_info)
7757 		return -EINVAL;
7758 
7759 	/*
7760 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
7761 	 */
7762 	if (adev->gfx.config.max_shader_engines *
7763 		adev->gfx.config.max_sh_per_se > 16)
7764 		return -EINVAL;
7765 
7766 	amdgpu_gfx_parse_disable_cu(adev, disable_masks,
7767 				    adev->gfx.config.max_shader_engines,
7768 				    adev->gfx.config.max_sh_per_se);
7769 
7770 	mutex_lock(&adev->grbm_idx_mutex);
7771 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7772 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7773 			mask = 1;
7774 			ao_bitmap = 0;
7775 			counter = 0;
7776 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
7777 			gfx_v9_0_set_user_cu_inactive_bitmap(
7778 				adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
7779 			bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
7780 
7781 			/*
7782 			 * The bitmap(and ao_cu_bitmap) in cu_info structure is
7783 			 * 4x4 size array, and it's usually suitable for Vega
7784 			 * ASICs which has 4*2 SE/SH layout.
7785 			 * But for Arcturus, SE/SH layout is changed to 8*1.
7786 			 * To mostly reduce the impact, we make it compatible
7787 			 * with current bitmap array as below:
7788 			 *    SE4,SH0 --> bitmap[0][1]
7789 			 *    SE5,SH0 --> bitmap[1][1]
7790 			 *    SE6,SH0 --> bitmap[2][1]
7791 			 *    SE7,SH0 --> bitmap[3][1]
7792 			 */
7793 			cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
7794 
7795 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7796 				if (bitmap & mask) {
7797 					if (counter < adev->gfx.config.max_cu_per_sh)
7798 						ao_bitmap |= mask;
7799 					counter ++;
7800 				}
7801 				mask <<= 1;
7802 			}
7803 			active_cu_number += counter;
7804 			if (i < 2 && j < 2)
7805 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7806 			cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7807 		}
7808 	}
7809 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7810 	mutex_unlock(&adev->grbm_idx_mutex);
7811 
7812 	cu_info->number = active_cu_number;
7813 	cu_info->ao_cu_mask = ao_cu_mask;
7814 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7815 
7816 	return 0;
7817 }
7818 
7819 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7820 {
7821 	.type = AMD_IP_BLOCK_TYPE_GFX,
7822 	.major = 9,
7823 	.minor = 0,
7824 	.rev = 0,
7825 	.funcs = &gfx_v9_0_ip_funcs,
7826 };
7827