xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c (revision 00e08fb2e7ce88e2ae366cbc79997d71d014b0ac)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36 
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39 
40 #include "vega10_enum.h"
41 
42 #include "soc15_common.h"
43 #include "clearstate_gfx9.h"
44 #include "v9_structs.h"
45 
46 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47 
48 #include "amdgpu_ras.h"
49 
50 #include "amdgpu_ring_mux.h"
51 #include "gfx_v9_4.h"
52 #include "gfx_v9_0.h"
53 #include "gfx_v9_0_cleaner_shader.h"
54 #include "gfx_v9_4_2.h"
55 
56 #include "asic_reg/pwr/pwr_10_0_offset.h"
57 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
58 #include "asic_reg/gc/gc_9_0_default.h"
59 
60 #define GFX9_NUM_GFX_RINGS     1
61 #define GFX9_NUM_SW_GFX_RINGS  2
62 #define GFX9_MEC_HPD_SIZE 4096
63 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
64 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
65 
66 #define mmGCEA_PROBE_MAP                        0x070c
67 #define mmGCEA_PROBE_MAP_BASE_IDX               0
68 
69 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
72 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
73 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
74 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
75 
76 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
79 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
80 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
82 
83 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
86 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
87 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
88 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
89 
90 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
91 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/raven_me.bin");
93 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
94 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
95 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
96 
97 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
101 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
102 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
103 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
104 
105 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
108 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
109 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
110 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
111 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
112 
113 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
114 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
115 
116 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
119 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
120 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
121 
122 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
125 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
126 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
127 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
128 
129 MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
130 MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
131 MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
132 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
133 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
134 
135 #define mmTCP_CHAN_STEER_0_ARCT								0x0b03
136 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX							0
137 #define mmTCP_CHAN_STEER_1_ARCT								0x0b04
138 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX							0
139 #define mmTCP_CHAN_STEER_2_ARCT								0x0b09
140 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX							0
141 #define mmTCP_CHAN_STEER_3_ARCT								0x0b0a
142 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX							0
143 #define mmTCP_CHAN_STEER_4_ARCT								0x0b0b
144 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX							0
145 #define mmTCP_CHAN_STEER_5_ARCT								0x0b0c
146 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX							0
147 
148 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir                0x0025
149 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX       1
150 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir                0x0026
151 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX       1
152 
153 static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
154 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS),
155 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2),
156 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT1),
157 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT2),
158 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STALLED_STAT1),
159 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STALLED_STAT1),
160 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_BUSY_STAT),
161 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT),
162 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT),
163 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STATUS),
164 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_ERROR),
165 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_BASE),
166 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_RPTR),
167 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR),
168 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_BASE),
169 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_RPTR),
170 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_WPTR),
171 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_BASE),
172 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_RPTR),
173 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_WPTR),
174 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_BASE),
175 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
176 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
177 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_CMD_BUFSZ),
178 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_CMD_BUFSZ),
179 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_CMD_BUFSZ),
180 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_CMD_BUFSZ),
181 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_LO),
182 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_HI),
183 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BUFSZ),
184 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_LO),
185 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_HI),
186 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BUFSZ),
187 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_LO),
188 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_HI),
189 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BUFSZ),
190 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_LO),
191 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_HI),
192 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BUFSZ),
193 	SOC15_REG_ENTRY_STR(GC, 0, mmCPF_UTCL1_STATUS),
194 	SOC15_REG_ENTRY_STR(GC, 0, mmCPC_UTCL1_STATUS),
195 	SOC15_REG_ENTRY_STR(GC, 0, mmCPG_UTCL1_STATUS),
196 	SOC15_REG_ENTRY_STR(GC, 0, mmGDS_PROTECTION_FAULT),
197 	SOC15_REG_ENTRY_STR(GC, 0, mmGDS_VM_PROTECTION_FAULT),
198 	SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS),
199 	SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_CNTL),
200 	SOC15_REG_ENTRY_STR(GC, 0, mmPA_CL_CNTL_STATUS),
201 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_UTCL1_STATUS),
202 	SOC15_REG_ENTRY_STR(GC, 0, mmRMI_UTCL1_STATUS),
203 	SOC15_REG_ENTRY_STR(GC, 0, mmSQC_DCACHE_UTCL1_STATUS),
204 	SOC15_REG_ENTRY_STR(GC, 0, mmSQC_ICACHE_UTCL1_STATUS),
205 	SOC15_REG_ENTRY_STR(GC, 0, mmSQ_UTCL1_STATUS),
206 	SOC15_REG_ENTRY_STR(GC, 0, mmTCP_UTCL1_STATUS),
207 	SOC15_REG_ENTRY_STR(GC, 0, mmWD_UTCL1_STATUS),
208 	SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL),
209 	SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS),
210 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_DEBUG),
211 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_CNTL),
212 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_INSTR_PNTR),
213 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC1_INSTR_PNTR),
214 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC2_INSTR_PNTR),
215 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_INSTR_PNTR),
216 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_INSTR_PNTR),
217 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STATUS),
218 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_STAT),
219 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_COMMAND),
220 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_MESSAGE),
221 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_1),
222 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_2),
223 	SOC15_REG_ENTRY_STR(GC, 0, mmSMU_RLC_RESPONSE),
224 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SAFE_MODE),
225 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
226 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
227 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
228 	/* SE status registers */
229 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
230 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
231 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
232 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
233 	/* packet headers */
234 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
235 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
236 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
237 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
238 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
239 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
240 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
241 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
242 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
243 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
244 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
245 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
246 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
247 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
248 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
249 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
250 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
251 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
252 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
253 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
254 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
255 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
256 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
257 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
258 };
259 
260 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
261 	/* compute queue registers */
262 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_VMID),
263 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ACTIVE),
264 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PERSISTENT_STATE),
265 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PIPE_PRIORITY),
266 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUEUE_PRIORITY),
267 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUANTUM),
268 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE),
269 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE_HI),
270 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_RPTR),
271 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
272 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
273 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
274 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_CONTROL),
275 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR),
276 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR_HI),
277 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_RPTR),
278 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_CONTROL),
279 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_REQUEST),
280 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
281 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI),
282 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_CONTROL),
283 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_RPTR),
284 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR),
285 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_EVENTS),
286 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_LO),
287 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_HI),
288 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_CONTROL),
289 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_OFFSET),
290 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_SIZE),
291 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_WG_STATE_OFFSET),
292 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_SIZE),
293 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GDS_RESOURCE_STATE),
294 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ERROR),
295 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR_MEM),
296 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
297 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
298 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
299 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
300 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
301 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
302 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
303 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
304 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
305 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
306 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
307 };
308 
309 enum ta_ras_gfx_subblock {
310 	/*CPC*/
311 	TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
312 	TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
313 	TA_RAS_BLOCK__GFX_CPC_UCODE,
314 	TA_RAS_BLOCK__GFX_DC_STATE_ME1,
315 	TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
316 	TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
317 	TA_RAS_BLOCK__GFX_DC_STATE_ME2,
318 	TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
319 	TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
320 	TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
321 	/* CPF*/
322 	TA_RAS_BLOCK__GFX_CPF_INDEX_START,
323 	TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
324 	TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
325 	TA_RAS_BLOCK__GFX_CPF_TAG,
326 	TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
327 	/* CPG*/
328 	TA_RAS_BLOCK__GFX_CPG_INDEX_START,
329 	TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
330 	TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
331 	TA_RAS_BLOCK__GFX_CPG_TAG,
332 	TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
333 	/* GDS*/
334 	TA_RAS_BLOCK__GFX_GDS_INDEX_START,
335 	TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
336 	TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
337 	TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
338 	TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
339 	TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
340 	TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
341 	/* SPI*/
342 	TA_RAS_BLOCK__GFX_SPI_SR_MEM,
343 	/* SQ*/
344 	TA_RAS_BLOCK__GFX_SQ_INDEX_START,
345 	TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
346 	TA_RAS_BLOCK__GFX_SQ_LDS_D,
347 	TA_RAS_BLOCK__GFX_SQ_LDS_I,
348 	TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
349 	TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
350 	/* SQC (3 ranges)*/
351 	TA_RAS_BLOCK__GFX_SQC_INDEX_START,
352 	/* SQC range 0*/
353 	TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
354 	TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
355 		TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
356 	TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
357 	TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
358 	TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
359 	TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
360 	TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
361 	TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
362 	TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
363 		TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
364 	/* SQC range 1*/
365 	TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
366 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
367 		TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
368 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
369 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
370 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
371 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
372 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
373 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
374 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
375 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
376 	TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
377 		TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
378 	/* SQC range 2*/
379 	TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
380 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
381 		TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
382 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
383 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
384 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
385 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
386 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
387 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
388 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
389 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
390 	TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
391 		TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
392 	TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
393 	/* TA*/
394 	TA_RAS_BLOCK__GFX_TA_INDEX_START,
395 	TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
396 	TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
397 	TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
398 	TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
399 	TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
400 	TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
401 	/* TCA*/
402 	TA_RAS_BLOCK__GFX_TCA_INDEX_START,
403 	TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
404 	TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
405 	TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
406 	/* TCC (5 sub-ranges)*/
407 	TA_RAS_BLOCK__GFX_TCC_INDEX_START,
408 	/* TCC range 0*/
409 	TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
410 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
411 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
412 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
413 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
414 	TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
415 	TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
416 	TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
417 	TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
418 	TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
419 	/* TCC range 1*/
420 	TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
421 	TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
422 	TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
423 	TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
424 		TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
425 	/* TCC range 2*/
426 	TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
427 	TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
428 	TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
429 	TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
430 	TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
431 	TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
432 	TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
433 	TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
434 	TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
435 	TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
436 		TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
437 	/* TCC range 3*/
438 	TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
439 	TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
440 	TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
441 	TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
442 		TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
443 	/* TCC range 4*/
444 	TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
445 	TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
446 		TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
447 	TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
448 	TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
449 		TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
450 	TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
451 	/* TCI*/
452 	TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
453 	/* TCP*/
454 	TA_RAS_BLOCK__GFX_TCP_INDEX_START,
455 	TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
456 	TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
457 	TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
458 	TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
459 	TA_RAS_BLOCK__GFX_TCP_DB_RAM,
460 	TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
461 	TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
462 	TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
463 	/* TD*/
464 	TA_RAS_BLOCK__GFX_TD_INDEX_START,
465 	TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
466 	TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
467 	TA_RAS_BLOCK__GFX_TD_CS_FIFO,
468 	TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
469 	/* EA (3 sub-ranges)*/
470 	TA_RAS_BLOCK__GFX_EA_INDEX_START,
471 	/* EA range 0*/
472 	TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
473 	TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
474 	TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
475 	TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
476 	TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
477 	TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
478 	TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
479 	TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
480 	TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
481 	TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
482 	/* EA range 1*/
483 	TA_RAS_BLOCK__GFX_EA_INDEX1_START,
484 	TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
485 	TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
486 	TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
487 	TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
488 	TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
489 	TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
490 	TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
491 	TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
492 	/* EA range 2*/
493 	TA_RAS_BLOCK__GFX_EA_INDEX2_START,
494 	TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
495 	TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
496 	TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
497 	TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
498 	TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
499 	TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
500 	/* UTC VM L2 bank*/
501 	TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
502 	/* UTC VM walker*/
503 	TA_RAS_BLOCK__UTC_VML2_WALKER,
504 	/* UTC ATC L2 2MB cache*/
505 	TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
506 	/* UTC ATC L2 4KB cache*/
507 	TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
508 	TA_RAS_BLOCK__GFX_MAX
509 };
510 
511 struct ras_gfx_subblock {
512 	unsigned char *name;
513 	int ta_subblock;
514 	int hw_supported_error_type;
515 	int sw_supported_error_type;
516 };
517 
518 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
519 	[AMDGPU_RAS_BLOCK__##subblock] = {                                     \
520 		#subblock,                                                     \
521 		TA_RAS_BLOCK__##subblock,                                      \
522 		((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
523 		(((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
524 	}
525 
526 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
527 	AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
528 	AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
529 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
530 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
531 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
532 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
533 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
534 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
535 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
536 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
537 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
538 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
539 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
540 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
541 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
542 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
543 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
544 			     0),
545 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
546 			     0),
547 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
548 	AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
549 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
550 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
551 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
552 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
553 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
554 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
555 			     0, 0),
556 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
557 			     0),
558 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
559 			     0, 0),
560 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
561 			     0),
562 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
563 			     0, 0),
564 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
565 			     0),
566 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
567 			     1),
568 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
569 			     0, 0, 0),
570 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
571 			     0),
572 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
573 			     0),
574 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
575 			     0),
576 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
577 			     0),
578 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
579 			     0),
580 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
581 			     0, 0),
582 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
583 			     0),
584 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
585 			     0),
586 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
587 			     0, 0, 0),
588 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
589 			     0),
590 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
591 			     0),
592 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
593 			     0),
594 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
595 			     0),
596 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
597 			     0),
598 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
599 			     0, 0),
600 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
601 			     0),
602 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
603 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
604 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
605 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
606 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
607 	AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
608 	AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
609 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
610 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
611 			     1),
612 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
613 			     1),
614 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
615 			     1),
616 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
617 			     0),
618 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
619 			     0),
620 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
621 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
622 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
623 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
624 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
625 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
626 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
627 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
628 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
629 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
630 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
631 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
632 			     0),
633 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
634 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
635 			     0),
636 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
637 			     0, 0),
638 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
639 			     0),
640 	AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
641 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
642 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
643 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
644 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
645 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
646 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
647 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
648 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
649 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
650 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
651 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
652 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
653 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
654 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
655 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
656 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
657 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
658 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
659 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
660 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
661 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
662 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
663 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
664 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
665 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
666 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
667 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
668 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
669 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
670 	AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
671 	AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
672 	AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
673 	AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
674 };
675 
676 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
677 {
678 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
679 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
680 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
681 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
682 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
683 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
684 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
685 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
686 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
687 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
688 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
689 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
690 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
691 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
692 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
693 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
694 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
695 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
696 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
697 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
698 };
699 
700 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
701 {
702 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
703 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
704 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
705 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
706 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
707 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
708 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
709 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
710 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
711 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
712 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
713 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
714 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
715 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
716 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
717 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
718 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
719 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
720 };
721 
722 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
723 {
724 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
725 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
726 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
727 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
728 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
729 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
730 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
731 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
732 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
733 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
734 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
735 };
736 
737 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
738 {
739 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
740 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
741 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
742 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
743 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
744 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
745 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
746 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
747 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
748 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
749 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
750 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
751 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
752 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
753 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
754 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
755 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
756 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
757 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
758 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
759 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
760 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
761 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
762 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
763 };
764 
765 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
766 {
767 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
768 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
769 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
770 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
771 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
772 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
773 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
774 };
775 
776 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
777 {
778 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
779 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
780 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
781 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
782 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
783 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
784 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
785 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
786 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
787 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
788 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
789 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
790 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
791 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
792 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
793 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
794 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
795 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
796 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
797 };
798 
799 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
800 {
801 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
802 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
803 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
804 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
805 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
806 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
807 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
808 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
809 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
810 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
811 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
812 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
813 };
814 
815 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
816 {
817 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
818 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
819 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
820 };
821 
822 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
823 {
824 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
825 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
826 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
827 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
828 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
829 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
830 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
831 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
832 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
833 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
834 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
835 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
836 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
837 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
838 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
839 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
840 };
841 
842 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
843 {
844 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
845 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
846 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
847 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
848 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
849 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
850 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
851 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
852 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
853 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
854 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
855 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
856 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
857 };
858 
859 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
860 {
861 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
862 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
863 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
864 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
865 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
866 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
867 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
868 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
869 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
870 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
871 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
872 };
873 
874 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
875 	{SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
876 	{SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
877 };
878 
879 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
880 {
881 	mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
882 	mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
883 	mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
884 	mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
885 	mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
886 	mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
887 	mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
888 	mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
889 };
890 
891 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
892 {
893 	mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
894 	mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
895 	mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
896 	mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
897 	mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
898 	mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
899 	mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
900 	mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
901 };
902 
903 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
904 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
905 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
906 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
907 
908 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
909 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
910 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
911 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
912 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
913 				struct amdgpu_cu_info *cu_info);
914 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
915 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
916 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
917 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
918 					  void *ras_error_status);
919 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
920 				     void *inject_if, uint32_t instance_mask);
921 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
922 static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
923 					      unsigned int vmid);
924 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
925 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
926 
927 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
928 				uint64_t queue_mask)
929 {
930 	struct amdgpu_device *adev = kiq_ring->adev;
931 	u64 shader_mc_addr;
932 
933 	/* Cleaner shader MC address */
934 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
935 
936 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
937 	amdgpu_ring_write(kiq_ring,
938 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
939 		/* vmid_mask:0* queue_type:0 (KIQ) */
940 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
941 	amdgpu_ring_write(kiq_ring,
942 			lower_32_bits(queue_mask));	/* queue mask lo */
943 	amdgpu_ring_write(kiq_ring,
944 			upper_32_bits(queue_mask));	/* queue mask hi */
945 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
946 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
947 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
948 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
949 }
950 
951 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
952 				 struct amdgpu_ring *ring)
953 {
954 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
955 	uint64_t wptr_addr = ring->wptr_gpu_addr;
956 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
957 
958 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
959 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
960 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
961 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
962 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
963 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
964 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
965 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
966 			 /*queue_type: normal compute queue */
967 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
968 			 /* alloc format: all_on_one_pipe */
969 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
970 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
971 			 /* num_queues: must be 1 */
972 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
973 	amdgpu_ring_write(kiq_ring,
974 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
975 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
976 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
977 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
978 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
979 }
980 
981 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
982 				   struct amdgpu_ring *ring,
983 				   enum amdgpu_unmap_queues_action action,
984 				   u64 gpu_addr, u64 seq)
985 {
986 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
987 
988 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
989 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
990 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
991 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
992 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
993 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
994 	amdgpu_ring_write(kiq_ring,
995 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
996 
997 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
998 		amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask));
999 		amdgpu_ring_write(kiq_ring, 0);
1000 		amdgpu_ring_write(kiq_ring, 0);
1001 
1002 	} else {
1003 		amdgpu_ring_write(kiq_ring, 0);
1004 		amdgpu_ring_write(kiq_ring, 0);
1005 		amdgpu_ring_write(kiq_ring, 0);
1006 	}
1007 }
1008 
1009 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
1010 				   struct amdgpu_ring *ring,
1011 				   u64 addr,
1012 				   u64 seq)
1013 {
1014 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
1015 
1016 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
1017 	amdgpu_ring_write(kiq_ring,
1018 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
1019 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
1020 			  PACKET3_QUERY_STATUS_COMMAND(2));
1021 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
1022 	amdgpu_ring_write(kiq_ring,
1023 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
1024 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
1025 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
1026 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
1027 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
1028 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
1029 }
1030 
1031 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
1032 				uint16_t pasid, uint32_t flush_type,
1033 				bool all_hub)
1034 {
1035 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
1036 	amdgpu_ring_write(kiq_ring,
1037 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
1038 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
1039 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
1040 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
1041 }
1042 
1043 
1044 static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
1045 					uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
1046 					uint32_t xcc_id, uint32_t vmid)
1047 {
1048 	struct amdgpu_device *adev = kiq_ring->adev;
1049 	unsigned i;
1050 
1051 	/* enter save mode */
1052 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1053 	mutex_lock(&adev->srbm_mutex);
1054 	soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0);
1055 
1056 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1057 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
1058 		WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
1059 		/* wait till dequeue take effects */
1060 		for (i = 0; i < adev->usec_timeout; i++) {
1061 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
1062 				break;
1063 			udelay(1);
1064 		}
1065 		if (i >= adev->usec_timeout)
1066 			dev_err(adev->dev, "fail to wait on hqd deactive\n");
1067 	} else {
1068 		dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
1069 	}
1070 
1071 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
1072 	mutex_unlock(&adev->srbm_mutex);
1073 	/* exit safe mode */
1074 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1075 }
1076 
1077 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
1078 	.kiq_set_resources = gfx_v9_0_kiq_set_resources,
1079 	.kiq_map_queues = gfx_v9_0_kiq_map_queues,
1080 	.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
1081 	.kiq_query_status = gfx_v9_0_kiq_query_status,
1082 	.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
1083 	.kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue,
1084 	.set_resources_size = 8,
1085 	.map_queues_size = 7,
1086 	.unmap_queues_size = 6,
1087 	.query_status_size = 7,
1088 	.invalidate_tlbs_size = 2,
1089 };
1090 
1091 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
1092 {
1093 	adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
1094 }
1095 
1096 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
1097 {
1098 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1099 	case IP_VERSION(9, 0, 1):
1100 		soc15_program_register_sequence(adev,
1101 						golden_settings_gc_9_0,
1102 						ARRAY_SIZE(golden_settings_gc_9_0));
1103 		soc15_program_register_sequence(adev,
1104 						golden_settings_gc_9_0_vg10,
1105 						ARRAY_SIZE(golden_settings_gc_9_0_vg10));
1106 		break;
1107 	case IP_VERSION(9, 2, 1):
1108 		soc15_program_register_sequence(adev,
1109 						golden_settings_gc_9_2_1,
1110 						ARRAY_SIZE(golden_settings_gc_9_2_1));
1111 		soc15_program_register_sequence(adev,
1112 						golden_settings_gc_9_2_1_vg12,
1113 						ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
1114 		break;
1115 	case IP_VERSION(9, 4, 0):
1116 		soc15_program_register_sequence(adev,
1117 						golden_settings_gc_9_0,
1118 						ARRAY_SIZE(golden_settings_gc_9_0));
1119 		soc15_program_register_sequence(adev,
1120 						golden_settings_gc_9_0_vg20,
1121 						ARRAY_SIZE(golden_settings_gc_9_0_vg20));
1122 		break;
1123 	case IP_VERSION(9, 4, 1):
1124 		soc15_program_register_sequence(adev,
1125 						golden_settings_gc_9_4_1_arct,
1126 						ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
1127 		break;
1128 	case IP_VERSION(9, 2, 2):
1129 	case IP_VERSION(9, 1, 0):
1130 		soc15_program_register_sequence(adev, golden_settings_gc_9_1,
1131 						ARRAY_SIZE(golden_settings_gc_9_1));
1132 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1133 			soc15_program_register_sequence(adev,
1134 							golden_settings_gc_9_1_rv2,
1135 							ARRAY_SIZE(golden_settings_gc_9_1_rv2));
1136 		else
1137 			soc15_program_register_sequence(adev,
1138 							golden_settings_gc_9_1_rv1,
1139 							ARRAY_SIZE(golden_settings_gc_9_1_rv1));
1140 		break;
1141 	 case IP_VERSION(9, 3, 0):
1142 		soc15_program_register_sequence(adev,
1143 						golden_settings_gc_9_1_rn,
1144 						ARRAY_SIZE(golden_settings_gc_9_1_rn));
1145 		return; /* for renoir, don't need common goldensetting */
1146 	case IP_VERSION(9, 4, 2):
1147 		gfx_v9_4_2_init_golden_registers(adev,
1148 						 adev->smuio.funcs->get_die_id(adev));
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
1155 	    (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)))
1156 		soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
1157 						(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
1158 }
1159 
1160 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1161 				       bool wc, uint32_t reg, uint32_t val)
1162 {
1163 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1164 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1165 				WRITE_DATA_DST_SEL(0) |
1166 				(wc ? WR_CONFIRM : 0));
1167 	amdgpu_ring_write(ring, reg);
1168 	amdgpu_ring_write(ring, 0);
1169 	amdgpu_ring_write(ring, val);
1170 }
1171 
1172 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1173 				  int mem_space, int opt, uint32_t addr0,
1174 				  uint32_t addr1, uint32_t ref, uint32_t mask,
1175 				  uint32_t inv)
1176 {
1177 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1178 	amdgpu_ring_write(ring,
1179 				 /* memory (1) or register (0) */
1180 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1181 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
1182 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1183 				 WAIT_REG_MEM_ENGINE(eng_sel)));
1184 
1185 	if (mem_space)
1186 		BUG_ON(addr0 & 0x3); /* Dword align */
1187 	amdgpu_ring_write(ring, addr0);
1188 	amdgpu_ring_write(ring, addr1);
1189 	amdgpu_ring_write(ring, ref);
1190 	amdgpu_ring_write(ring, mask);
1191 	amdgpu_ring_write(ring, inv); /* poll interval */
1192 }
1193 
1194 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1195 {
1196 	struct amdgpu_device *adev = ring->adev;
1197 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1198 	uint32_t tmp = 0;
1199 	unsigned i;
1200 	int r;
1201 
1202 	WREG32(scratch, 0xCAFEDEAD);
1203 	r = amdgpu_ring_alloc(ring, 3);
1204 	if (r)
1205 		return r;
1206 
1207 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1208 	amdgpu_ring_write(ring, scratch - PACKET3_SET_UCONFIG_REG_START);
1209 	amdgpu_ring_write(ring, 0xDEADBEEF);
1210 	amdgpu_ring_commit(ring);
1211 
1212 	for (i = 0; i < adev->usec_timeout; i++) {
1213 		tmp = RREG32(scratch);
1214 		if (tmp == 0xDEADBEEF)
1215 			break;
1216 		udelay(1);
1217 	}
1218 
1219 	if (i >= adev->usec_timeout)
1220 		r = -ETIMEDOUT;
1221 	return r;
1222 }
1223 
1224 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1225 {
1226 	struct amdgpu_device *adev = ring->adev;
1227 	struct amdgpu_ib ib;
1228 	struct dma_fence *f = NULL;
1229 
1230 	unsigned index;
1231 	uint64_t gpu_addr;
1232 	uint32_t tmp;
1233 	long r;
1234 
1235 	r = amdgpu_device_wb_get(adev, &index);
1236 	if (r)
1237 		return r;
1238 
1239 	gpu_addr = adev->wb.gpu_addr + (index * 4);
1240 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1241 	memset(&ib, 0, sizeof(ib));
1242 
1243 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
1244 	if (r)
1245 		goto err1;
1246 
1247 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1248 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1249 	ib.ptr[2] = lower_32_bits(gpu_addr);
1250 	ib.ptr[3] = upper_32_bits(gpu_addr);
1251 	ib.ptr[4] = 0xDEADBEEF;
1252 	ib.length_dw = 5;
1253 
1254 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1255 	if (r)
1256 		goto err2;
1257 
1258 	r = dma_fence_wait_timeout(f, false, timeout);
1259 	if (r == 0) {
1260 		r = -ETIMEDOUT;
1261 		goto err2;
1262 	} else if (r < 0) {
1263 		goto err2;
1264 	}
1265 
1266 	tmp = adev->wb.wb[index];
1267 	if (tmp == 0xDEADBEEF)
1268 		r = 0;
1269 	else
1270 		r = -EINVAL;
1271 
1272 err2:
1273 	amdgpu_ib_free(&ib, NULL);
1274 	dma_fence_put(f);
1275 err1:
1276 	amdgpu_device_wb_free(adev, index);
1277 	return r;
1278 }
1279 
1280 
1281 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1282 {
1283 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
1284 	amdgpu_ucode_release(&adev->gfx.me_fw);
1285 	amdgpu_ucode_release(&adev->gfx.ce_fw);
1286 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
1287 	amdgpu_ucode_release(&adev->gfx.mec_fw);
1288 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
1289 
1290 	kfree(adev->gfx.rlc.register_list_format);
1291 }
1292 
1293 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1294 {
1295 	adev->gfx.me_fw_write_wait = false;
1296 	adev->gfx.mec_fw_write_wait = false;
1297 
1298 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
1299 	    (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) &&
1300 	    ((adev->gfx.mec_fw_version < 0x000001a5) ||
1301 	     (adev->gfx.mec_feature_version < 46) ||
1302 	     (adev->gfx.pfp_fw_version < 0x000000b7) ||
1303 	     (adev->gfx.pfp_feature_version < 46)))
1304 		DRM_WARN_ONCE("CP firmware version too old, please update!");
1305 
1306 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1307 	case IP_VERSION(9, 0, 1):
1308 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1309 		    (adev->gfx.me_feature_version >= 42) &&
1310 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1311 		    (adev->gfx.pfp_feature_version >= 42))
1312 			adev->gfx.me_fw_write_wait = true;
1313 
1314 		if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1315 		    (adev->gfx.mec_feature_version >= 42))
1316 			adev->gfx.mec_fw_write_wait = true;
1317 		break;
1318 	case IP_VERSION(9, 2, 1):
1319 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1320 		    (adev->gfx.me_feature_version >= 44) &&
1321 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1322 		    (adev->gfx.pfp_feature_version >= 44))
1323 			adev->gfx.me_fw_write_wait = true;
1324 
1325 		if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1326 		    (adev->gfx.mec_feature_version >= 44))
1327 			adev->gfx.mec_fw_write_wait = true;
1328 		break;
1329 	case IP_VERSION(9, 4, 0):
1330 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1331 		    (adev->gfx.me_feature_version >= 44) &&
1332 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1333 		    (adev->gfx.pfp_feature_version >= 44))
1334 			adev->gfx.me_fw_write_wait = true;
1335 
1336 		if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1337 		    (adev->gfx.mec_feature_version >= 44))
1338 			adev->gfx.mec_fw_write_wait = true;
1339 		break;
1340 	case IP_VERSION(9, 1, 0):
1341 	case IP_VERSION(9, 2, 2):
1342 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1343 		    (adev->gfx.me_feature_version >= 42) &&
1344 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1345 		    (adev->gfx.pfp_feature_version >= 42))
1346 			adev->gfx.me_fw_write_wait = true;
1347 
1348 		if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1349 		    (adev->gfx.mec_feature_version >= 42))
1350 			adev->gfx.mec_fw_write_wait = true;
1351 		break;
1352 	default:
1353 		adev->gfx.me_fw_write_wait = true;
1354 		adev->gfx.mec_fw_write_wait = true;
1355 		break;
1356 	}
1357 }
1358 
1359 struct amdgpu_gfxoff_quirk {
1360 	u16 chip_vendor;
1361 	u16 chip_device;
1362 	u16 subsys_vendor;
1363 	u16 subsys_device;
1364 	u8 revision;
1365 };
1366 
1367 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1368 	/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1369 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1370 	/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1371 	{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1372 	/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1373 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1374 	/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
1375 	{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
1376 	/* https://bbs.openkylin.top/t/topic/171497 */
1377 	{ 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
1378 	/* HP 705G4 DM with R5 2400G */
1379 	{ 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
1380 	{ 0, 0, 0, 0, 0 },
1381 };
1382 
1383 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1384 {
1385 	const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1386 
1387 	while (p && p->chip_device != 0) {
1388 		if (pdev->vendor == p->chip_vendor &&
1389 		    pdev->device == p->chip_device &&
1390 		    pdev->subsystem_vendor == p->subsys_vendor &&
1391 		    pdev->subsystem_device == p->subsys_device &&
1392 		    pdev->revision == p->revision) {
1393 			return true;
1394 		}
1395 		++p;
1396 	}
1397 	return false;
1398 }
1399 
1400 static bool is_raven_kicker(struct amdgpu_device *adev)
1401 {
1402 	if (adev->pm.fw_version >= 0x41e2b)
1403 		return true;
1404 	else
1405 		return false;
1406 }
1407 
1408 static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
1409 {
1410 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) &&
1411 	    (adev->gfx.me_fw_version >= 0x000000a5) &&
1412 	    (adev->gfx.me_feature_version >= 52))
1413 		return true;
1414 	else
1415 		return false;
1416 }
1417 
1418 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1419 {
1420 	if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1421 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1422 
1423 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1424 	case IP_VERSION(9, 0, 1):
1425 	case IP_VERSION(9, 2, 1):
1426 	case IP_VERSION(9, 4, 0):
1427 		break;
1428 	case IP_VERSION(9, 2, 2):
1429 	case IP_VERSION(9, 1, 0):
1430 		if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1431 		      (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1432 		    ((!is_raven_kicker(adev) &&
1433 		      adev->gfx.rlc_fw_version < 531) ||
1434 		     (adev->gfx.rlc_feature_version < 1) ||
1435 		     !adev->gfx.rlc.is_rlc_v2_1))
1436 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1437 
1438 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1439 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1440 				AMD_PG_SUPPORT_CP |
1441 				AMD_PG_SUPPORT_RLC_SMU_HS;
1442 		break;
1443 	case IP_VERSION(9, 3, 0):
1444 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1445 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1446 				AMD_PG_SUPPORT_CP |
1447 				AMD_PG_SUPPORT_RLC_SMU_HS;
1448 		break;
1449 	default:
1450 		break;
1451 	}
1452 }
1453 
1454 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1455 					  char *chip_name)
1456 {
1457 	int err;
1458 
1459 	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
1460 				   AMDGPU_UCODE_REQUIRED,
1461 				   "amdgpu/%s_pfp.bin", chip_name);
1462 	if (err)
1463 		goto out;
1464 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
1465 
1466 	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
1467 				   AMDGPU_UCODE_REQUIRED,
1468 				   "amdgpu/%s_me.bin", chip_name);
1469 	if (err)
1470 		goto out;
1471 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
1472 
1473 	err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
1474 				   AMDGPU_UCODE_REQUIRED,
1475 				   "amdgpu/%s_ce.bin", chip_name);
1476 	if (err)
1477 		goto out;
1478 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
1479 
1480 out:
1481 	if (err) {
1482 		amdgpu_ucode_release(&adev->gfx.pfp_fw);
1483 		amdgpu_ucode_release(&adev->gfx.me_fw);
1484 		amdgpu_ucode_release(&adev->gfx.ce_fw);
1485 	}
1486 	return err;
1487 }
1488 
1489 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1490 				       char *chip_name)
1491 {
1492 	int err;
1493 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1494 	uint16_t version_major;
1495 	uint16_t version_minor;
1496 	uint32_t smu_version;
1497 
1498 	/*
1499 	 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1500 	 * instead of picasso_rlc.bin.
1501 	 * Judgment method:
1502 	 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1503 	 *          or revision >= 0xD8 && revision <= 0xDF
1504 	 * otherwise is PCO FP5
1505 	 */
1506 	if (!strcmp(chip_name, "picasso") &&
1507 		(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1508 		((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1509 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1510 					   AMDGPU_UCODE_REQUIRED,
1511 					   "amdgpu/%s_rlc_am4.bin", chip_name);
1512 	else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1513 		(smu_version >= 0x41e2b))
1514 		/**
1515 		*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1516 		*/
1517 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1518 					   AMDGPU_UCODE_REQUIRED,
1519 					   "amdgpu/%s_kicker_rlc.bin", chip_name);
1520 	else
1521 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1522 					   AMDGPU_UCODE_REQUIRED,
1523 					   "amdgpu/%s_rlc.bin", chip_name);
1524 	if (err)
1525 		goto out;
1526 
1527 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1528 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1529 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1530 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
1531 out:
1532 	if (err)
1533 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
1534 
1535 	return err;
1536 }
1537 
1538 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
1539 {
1540 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
1541 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1542 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0))
1543 		return false;
1544 
1545 	return true;
1546 }
1547 
1548 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1549 					      char *chip_name)
1550 {
1551 	int err;
1552 
1553 	if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1554 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1555 				   AMDGPU_UCODE_REQUIRED,
1556 				   "amdgpu/%s_sjt_mec.bin", chip_name);
1557 	else
1558 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1559 					   AMDGPU_UCODE_REQUIRED,
1560 					   "amdgpu/%s_mec.bin", chip_name);
1561 	if (err)
1562 		goto out;
1563 
1564 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
1565 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
1566 
1567 	if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1568 		if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1569 			err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1570 						   AMDGPU_UCODE_REQUIRED,
1571 						   "amdgpu/%s_sjt_mec2.bin", chip_name);
1572 		else
1573 			err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1574 						   AMDGPU_UCODE_REQUIRED,
1575 						   "amdgpu/%s_mec2.bin", chip_name);
1576 		if (!err) {
1577 			amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
1578 			amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
1579 		} else {
1580 			err = 0;
1581 			amdgpu_ucode_release(&adev->gfx.mec2_fw);
1582 		}
1583 	} else {
1584 		adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
1585 		adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
1586 	}
1587 
1588 	gfx_v9_0_check_if_need_gfxoff(adev);
1589 	gfx_v9_0_check_fw_write_wait(adev);
1590 
1591 out:
1592 	if (err)
1593 		amdgpu_ucode_release(&adev->gfx.mec_fw);
1594 	return err;
1595 }
1596 
1597 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1598 {
1599 	char ucode_prefix[30];
1600 	int r;
1601 
1602 	DRM_DEBUG("\n");
1603 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
1604 
1605 	/* No CPG in Arcturus */
1606 	if (adev->gfx.num_gfx_rings) {
1607 		r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
1608 		if (r)
1609 			return r;
1610 	}
1611 
1612 	r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
1613 	if (r)
1614 		return r;
1615 
1616 	r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
1617 	if (r)
1618 		return r;
1619 
1620 	return r;
1621 }
1622 
1623 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1624 {
1625 	u32 count = 0;
1626 	const struct cs_section_def *sect = NULL;
1627 	const struct cs_extent_def *ext = NULL;
1628 
1629 	/* begin clear state */
1630 	count += 2;
1631 	/* context control state */
1632 	count += 3;
1633 
1634 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1635 		for (ext = sect->section; ext->extent != NULL; ++ext) {
1636 			if (sect->id == SECT_CONTEXT)
1637 				count += 2 + ext->reg_count;
1638 			else
1639 				return 0;
1640 		}
1641 	}
1642 
1643 	/* end clear state */
1644 	count += 2;
1645 	/* clear state */
1646 	count += 2;
1647 
1648 	return count;
1649 }
1650 
1651 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
1652 {
1653 	u32 count = 0;
1654 
1655 	if (adev->gfx.rlc.cs_data == NULL)
1656 		return;
1657 	if (buffer == NULL)
1658 		return;
1659 
1660 	count = amdgpu_gfx_csb_preamble_start(buffer);
1661 	count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
1662 	amdgpu_gfx_csb_preamble_end(buffer, count);
1663 }
1664 
1665 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1666 {
1667 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1668 	uint32_t pg_always_on_cu_num = 2;
1669 	uint32_t always_on_cu_num;
1670 	uint32_t i, j, k;
1671 	uint32_t mask, cu_bitmap, counter;
1672 
1673 	if (adev->flags & AMD_IS_APU)
1674 		always_on_cu_num = 4;
1675 	else if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 2, 1))
1676 		always_on_cu_num = 8;
1677 	else
1678 		always_on_cu_num = 12;
1679 
1680 	mutex_lock(&adev->grbm_idx_mutex);
1681 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1682 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1683 			mask = 1;
1684 			cu_bitmap = 0;
1685 			counter = 0;
1686 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
1687 
1688 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1689 				if (cu_info->bitmap[0][i][j] & mask) {
1690 					if (counter == pg_always_on_cu_num)
1691 						WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1692 					if (counter < always_on_cu_num)
1693 						cu_bitmap |= mask;
1694 					else
1695 						break;
1696 					counter++;
1697 				}
1698 				mask <<= 1;
1699 			}
1700 
1701 			WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1702 			cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1703 		}
1704 	}
1705 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1706 	mutex_unlock(&adev->grbm_idx_mutex);
1707 }
1708 
1709 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1710 {
1711 	uint32_t data;
1712 
1713 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1714 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1715 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1716 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1717 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1718 
1719 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1720 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1721 
1722 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1723 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1724 
1725 	mutex_lock(&adev->grbm_idx_mutex);
1726 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1727 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1728 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1729 
1730 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1731 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1732 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1733 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1734 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1735 
1736 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1737 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1738 	data &= 0x0000FFFF;
1739 	data |= 0x00C00000;
1740 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1741 
1742 	/*
1743 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1744 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1745 	 */
1746 
1747 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1748 	 * but used for RLC_LB_CNTL configuration */
1749 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1750 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1751 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1752 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1753 	mutex_unlock(&adev->grbm_idx_mutex);
1754 
1755 	gfx_v9_0_init_always_on_cu_mask(adev);
1756 }
1757 
1758 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1759 {
1760 	uint32_t data;
1761 
1762 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1763 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1764 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1765 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1766 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1767 
1768 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1769 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1770 
1771 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1772 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1773 
1774 	mutex_lock(&adev->grbm_idx_mutex);
1775 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1776 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1777 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1778 
1779 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1780 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1781 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1782 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1783 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1784 
1785 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1786 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1787 	data &= 0x0000FFFF;
1788 	data |= 0x00C00000;
1789 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1790 
1791 	/*
1792 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1793 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1794 	 */
1795 
1796 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1797 	 * but used for RLC_LB_CNTL configuration */
1798 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1799 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1800 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1801 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1802 	mutex_unlock(&adev->grbm_idx_mutex);
1803 
1804 	gfx_v9_0_init_always_on_cu_mask(adev);
1805 }
1806 
1807 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1808 {
1809 	WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1810 }
1811 
1812 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1813 {
1814 	if (gfx_v9_0_load_mec2_fw_bin_support(adev))
1815 		return 5;
1816 	else
1817 		return 4;
1818 }
1819 
1820 static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1821 {
1822 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1823 
1824 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
1825 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1826 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
1827 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
1828 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
1829 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
1830 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
1831 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
1832 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1833 }
1834 
1835 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1836 {
1837 	const struct cs_section_def *cs_data;
1838 	int r;
1839 
1840 	adev->gfx.rlc.cs_data = gfx9_cs_data;
1841 
1842 	cs_data = adev->gfx.rlc.cs_data;
1843 
1844 	if (cs_data) {
1845 		/* init clear state block */
1846 		r = amdgpu_gfx_rlc_init_csb(adev);
1847 		if (r)
1848 			return r;
1849 	}
1850 
1851 	if (adev->flags & AMD_IS_APU) {
1852 		/* TODO: double check the cp_table_size for RV */
1853 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1854 		r = amdgpu_gfx_rlc_init_cpt(adev);
1855 		if (r)
1856 			return r;
1857 	}
1858 
1859 	return 0;
1860 }
1861 
1862 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1863 {
1864 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1865 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1866 }
1867 
1868 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1869 {
1870 	int r;
1871 	u32 *hpd;
1872 	const __le32 *fw_data;
1873 	unsigned fw_size;
1874 	u32 *fw;
1875 	size_t mec_hpd_size;
1876 
1877 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1878 
1879 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1880 
1881 	/* take ownership of the relevant compute queues */
1882 	amdgpu_gfx_compute_queue_acquire(adev);
1883 	mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1884 	if (mec_hpd_size) {
1885 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1886 					      AMDGPU_GEM_DOMAIN_VRAM |
1887 					      AMDGPU_GEM_DOMAIN_GTT,
1888 					      &adev->gfx.mec.hpd_eop_obj,
1889 					      &adev->gfx.mec.hpd_eop_gpu_addr,
1890 					      (void **)&hpd);
1891 		if (r) {
1892 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1893 			gfx_v9_0_mec_fini(adev);
1894 			return r;
1895 		}
1896 
1897 		memset(hpd, 0, mec_hpd_size);
1898 
1899 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1900 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1901 	}
1902 
1903 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1904 
1905 	fw_data = (const __le32 *)
1906 		(adev->gfx.mec_fw->data +
1907 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1908 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1909 
1910 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1911 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1912 				      &adev->gfx.mec.mec_fw_obj,
1913 				      &adev->gfx.mec.mec_fw_gpu_addr,
1914 				      (void **)&fw);
1915 	if (r) {
1916 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1917 		gfx_v9_0_mec_fini(adev);
1918 		return r;
1919 	}
1920 
1921 	memcpy(fw, fw_data, fw_size);
1922 
1923 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1924 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1925 
1926 	return 0;
1927 }
1928 
1929 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1930 {
1931 	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1932 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1933 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1934 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
1935 		(SQ_IND_INDEX__FORCE_READ_MASK));
1936 	return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1937 }
1938 
1939 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1940 			   uint32_t wave, uint32_t thread,
1941 			   uint32_t regno, uint32_t num, uint32_t *out)
1942 {
1943 	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1944 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1945 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1946 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
1947 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1948 		(SQ_IND_INDEX__FORCE_READ_MASK) |
1949 		(SQ_IND_INDEX__AUTO_INCR_MASK));
1950 	while (num--)
1951 		*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1952 }
1953 
1954 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1955 {
1956 	/* type 1 wave data */
1957 	dst[(*no_fields)++] = 1;
1958 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1959 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1960 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1961 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1962 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1963 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1964 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1965 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1966 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1967 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1968 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1969 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1970 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1971 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1972 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
1973 }
1974 
1975 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1976 				     uint32_t wave, uint32_t start,
1977 				     uint32_t size, uint32_t *dst)
1978 {
1979 	wave_read_regs(
1980 		adev, simd, wave, 0,
1981 		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1982 }
1983 
1984 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1985 				     uint32_t wave, uint32_t thread,
1986 				     uint32_t start, uint32_t size,
1987 				     uint32_t *dst)
1988 {
1989 	wave_read_regs(
1990 		adev, simd, wave, thread,
1991 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1992 }
1993 
1994 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1995 				  u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
1996 {
1997 	soc15_grbm_select(adev, me, pipe, q, vm, 0);
1998 }
1999 
2000 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2001         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2002         .select_se_sh = &gfx_v9_0_select_se_sh,
2003         .read_wave_data = &gfx_v9_0_read_wave_data,
2004         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2005         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2006         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2007 	.get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
2008 };
2009 
2010 const struct amdgpu_ras_block_hw_ops  gfx_v9_0_ras_ops = {
2011 		.ras_error_inject = &gfx_v9_0_ras_error_inject,
2012 		.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2013 		.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2014 };
2015 
2016 static struct amdgpu_gfx_ras gfx_v9_0_ras = {
2017 	.ras_block = {
2018 		.hw_ops = &gfx_v9_0_ras_ops,
2019 	},
2020 };
2021 
2022 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2023 {
2024 	u32 gb_addr_config;
2025 	int err;
2026 
2027 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2028 	case IP_VERSION(9, 0, 1):
2029 		adev->gfx.config.max_hw_contexts = 8;
2030 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2031 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2032 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2033 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2034 		gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2035 		break;
2036 	case IP_VERSION(9, 2, 1):
2037 		adev->gfx.config.max_hw_contexts = 8;
2038 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2039 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2040 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2041 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2042 		gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2043 		DRM_INFO("fix gfx.config for vega12\n");
2044 		break;
2045 	case IP_VERSION(9, 4, 0):
2046 		adev->gfx.ras = &gfx_v9_0_ras;
2047 		adev->gfx.config.max_hw_contexts = 8;
2048 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2049 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2050 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2051 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2052 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2053 		gb_addr_config &= ~0xf3e777ff;
2054 		gb_addr_config |= 0x22014042;
2055 		/* check vbios table if gpu info is not available */
2056 		err = amdgpu_atomfirmware_get_gfx_info(adev);
2057 		if (err)
2058 			return err;
2059 		break;
2060 	case IP_VERSION(9, 2, 2):
2061 	case IP_VERSION(9, 1, 0):
2062 		adev->gfx.config.max_hw_contexts = 8;
2063 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2064 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2065 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2066 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2067 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2068 			gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2069 		else
2070 			gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2071 		break;
2072 	case IP_VERSION(9, 4, 1):
2073 		adev->gfx.ras = &gfx_v9_4_ras;
2074 		adev->gfx.config.max_hw_contexts = 8;
2075 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2076 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2077 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2078 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2079 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2080 		gb_addr_config &= ~0xf3e777ff;
2081 		gb_addr_config |= 0x22014042;
2082 		break;
2083 	case IP_VERSION(9, 3, 0):
2084 		adev->gfx.config.max_hw_contexts = 8;
2085 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2086 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2087 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2088 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2089 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2090 		gb_addr_config &= ~0xf3e777ff;
2091 		gb_addr_config |= 0x22010042;
2092 		break;
2093 	case IP_VERSION(9, 4, 2):
2094 		adev->gfx.ras = &gfx_v9_4_2_ras;
2095 		adev->gfx.config.max_hw_contexts = 8;
2096 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2097 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2098 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2099 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2100 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2101 		gb_addr_config &= ~0xf3e777ff;
2102 		gb_addr_config |= 0x22014042;
2103 		/* check vbios table if gpu info is not available */
2104 		err = amdgpu_atomfirmware_get_gfx_info(adev);
2105 		if (err)
2106 			return err;
2107 		break;
2108 	default:
2109 		BUG();
2110 		break;
2111 	}
2112 
2113 	adev->gfx.config.gb_addr_config = gb_addr_config;
2114 
2115 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2116 			REG_GET_FIELD(
2117 					adev->gfx.config.gb_addr_config,
2118 					GB_ADDR_CONFIG,
2119 					NUM_PIPES);
2120 
2121 	adev->gfx.config.max_tile_pipes =
2122 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2123 
2124 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2125 			REG_GET_FIELD(
2126 					adev->gfx.config.gb_addr_config,
2127 					GB_ADDR_CONFIG,
2128 					NUM_BANKS);
2129 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2130 			REG_GET_FIELD(
2131 					adev->gfx.config.gb_addr_config,
2132 					GB_ADDR_CONFIG,
2133 					MAX_COMPRESSED_FRAGS);
2134 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2135 			REG_GET_FIELD(
2136 					adev->gfx.config.gb_addr_config,
2137 					GB_ADDR_CONFIG,
2138 					NUM_RB_PER_SE);
2139 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2140 			REG_GET_FIELD(
2141 					adev->gfx.config.gb_addr_config,
2142 					GB_ADDR_CONFIG,
2143 					NUM_SHADER_ENGINES);
2144 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2145 			REG_GET_FIELD(
2146 					adev->gfx.config.gb_addr_config,
2147 					GB_ADDR_CONFIG,
2148 					PIPE_INTERLEAVE_SIZE));
2149 
2150 	return 0;
2151 }
2152 
2153 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2154 				      int mec, int pipe, int queue)
2155 {
2156 	unsigned irq_type;
2157 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2158 	unsigned int hw_prio;
2159 
2160 	ring = &adev->gfx.compute_ring[ring_id];
2161 
2162 	/* mec0 is me1 */
2163 	ring->me = mec + 1;
2164 	ring->pipe = pipe;
2165 	ring->queue = queue;
2166 
2167 	ring->ring_obj = NULL;
2168 	ring->use_doorbell = true;
2169 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2170 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2171 				+ (ring_id * GFX9_MEC_HPD_SIZE);
2172 	ring->vm_hub = AMDGPU_GFXHUB(0);
2173 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2174 
2175 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2176 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2177 		+ ring->pipe;
2178 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
2179 			AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
2180 	/* type-2 packets are deprecated on MEC, use type-3 instead */
2181 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
2182 				hw_prio, NULL);
2183 }
2184 
2185 static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
2186 {
2187 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
2188 	uint32_t *ptr;
2189 	uint32_t inst;
2190 
2191 	ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
2192 	if (!ptr) {
2193 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
2194 		adev->gfx.ip_dump_core = NULL;
2195 	} else {
2196 		adev->gfx.ip_dump_core = ptr;
2197 	}
2198 
2199 	/* Allocate memory for compute queue registers for all the instances */
2200 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
2201 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
2202 		adev->gfx.mec.num_queue_per_pipe;
2203 
2204 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
2205 	if (!ptr) {
2206 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
2207 		adev->gfx.ip_dump_compute_queues = NULL;
2208 	} else {
2209 		adev->gfx.ip_dump_compute_queues = ptr;
2210 	}
2211 }
2212 
2213 static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
2214 {
2215 	int i, j, k, r, ring_id;
2216 	int xcc_id = 0;
2217 	struct amdgpu_ring *ring;
2218 	struct amdgpu_device *adev = ip_block->adev;
2219 	unsigned int hw_prio;
2220 
2221 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2222 	case IP_VERSION(9, 0, 1):
2223 	case IP_VERSION(9, 2, 1):
2224 	case IP_VERSION(9, 4, 0):
2225 	case IP_VERSION(9, 2, 2):
2226 	case IP_VERSION(9, 1, 0):
2227 	case IP_VERSION(9, 4, 1):
2228 	case IP_VERSION(9, 3, 0):
2229 	case IP_VERSION(9, 4, 2):
2230 		adev->gfx.mec.num_mec = 2;
2231 		break;
2232 	default:
2233 		adev->gfx.mec.num_mec = 1;
2234 		break;
2235 	}
2236 
2237 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2238 	case IP_VERSION(9, 0, 1):
2239 	case IP_VERSION(9, 2, 1):
2240 	case IP_VERSION(9, 4, 0):
2241 	case IP_VERSION(9, 2, 2):
2242 	case IP_VERSION(9, 1, 0):
2243 	case IP_VERSION(9, 3, 0):
2244 		adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
2245 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
2246 		if (adev->gfx.me_fw_version  >= 167 &&
2247 		    adev->gfx.pfp_fw_version >= 196 &&
2248 		    adev->gfx.mec_fw_version >= 474) {
2249 			adev->gfx.enable_cleaner_shader = true;
2250 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
2251 			if (r) {
2252 				adev->gfx.enable_cleaner_shader = false;
2253 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
2254 			}
2255 		}
2256 		break;
2257 	case IP_VERSION(9, 4, 2):
2258 		adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
2259 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
2260 		if (adev->gfx.mec_fw_version >= 88) {
2261 			adev->gfx.enable_cleaner_shader = true;
2262 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
2263 			if (r) {
2264 				adev->gfx.enable_cleaner_shader = false;
2265 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
2266 			}
2267 		}
2268 		break;
2269 	default:
2270 		adev->gfx.enable_cleaner_shader = false;
2271 		break;
2272 	}
2273 
2274 	adev->gfx.mec.num_pipe_per_mec = 4;
2275 	adev->gfx.mec.num_queue_per_pipe = 8;
2276 
2277 	/* EOP Event */
2278 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2279 	if (r)
2280 		return r;
2281 
2282 	/* Bad opcode Event */
2283 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
2284 			      GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
2285 			      &adev->gfx.bad_op_irq);
2286 	if (r)
2287 		return r;
2288 
2289 	/* Privileged reg */
2290 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2291 			      &adev->gfx.priv_reg_irq);
2292 	if (r)
2293 		return r;
2294 
2295 	/* Privileged inst */
2296 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2297 			      &adev->gfx.priv_inst_irq);
2298 	if (r)
2299 		return r;
2300 
2301 	/* ECC error */
2302 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2303 			      &adev->gfx.cp_ecc_error_irq);
2304 	if (r)
2305 		return r;
2306 
2307 	/* FUE error */
2308 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2309 			      &adev->gfx.cp_ecc_error_irq);
2310 	if (r)
2311 		return r;
2312 
2313 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2314 
2315 	if (adev->gfx.rlc.funcs) {
2316 		if (adev->gfx.rlc.funcs->init) {
2317 			r = adev->gfx.rlc.funcs->init(adev);
2318 			if (r) {
2319 				dev_err(adev->dev, "Failed to init rlc BOs!\n");
2320 				return r;
2321 			}
2322 		}
2323 	}
2324 
2325 	r = gfx_v9_0_mec_init(adev);
2326 	if (r) {
2327 		DRM_ERROR("Failed to init MEC BOs!\n");
2328 		return r;
2329 	}
2330 
2331 	/* set up the gfx ring */
2332 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2333 		ring = &adev->gfx.gfx_ring[i];
2334 		ring->ring_obj = NULL;
2335 		if (!i)
2336 			sprintf(ring->name, "gfx");
2337 		else
2338 			sprintf(ring->name, "gfx_%d", i);
2339 		ring->use_doorbell = true;
2340 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2341 
2342 		/* disable scheduler on the real ring */
2343 		ring->no_scheduler = adev->gfx.mcbp;
2344 		ring->vm_hub = AMDGPU_GFXHUB(0);
2345 		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2346 				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2347 				     AMDGPU_RING_PRIO_DEFAULT, NULL);
2348 		if (r)
2349 			return r;
2350 	}
2351 
2352 	/* set up the software rings */
2353 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
2354 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
2355 			ring = &adev->gfx.sw_gfx_ring[i];
2356 			ring->ring_obj = NULL;
2357 			sprintf(ring->name, amdgpu_sw_ring_name(i));
2358 			ring->use_doorbell = true;
2359 			ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2360 			ring->is_sw_ring = true;
2361 			hw_prio = amdgpu_sw_ring_priority(i);
2362 			ring->vm_hub = AMDGPU_GFXHUB(0);
2363 			r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2364 					     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
2365 					     NULL);
2366 			if (r)
2367 				return r;
2368 			ring->wptr = 0;
2369 		}
2370 
2371 		/* init the muxer and add software rings */
2372 		r = amdgpu_ring_mux_init(&adev->gfx.muxer, &adev->gfx.gfx_ring[0],
2373 					 GFX9_NUM_SW_GFX_RINGS);
2374 		if (r) {
2375 			DRM_ERROR("amdgpu_ring_mux_init failed(%d)\n", r);
2376 			return r;
2377 		}
2378 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
2379 			r = amdgpu_ring_mux_add_sw_ring(&adev->gfx.muxer,
2380 							&adev->gfx.sw_gfx_ring[i]);
2381 			if (r) {
2382 				DRM_ERROR("amdgpu_ring_mux_add_sw_ring failed(%d)\n", r);
2383 				return r;
2384 			}
2385 		}
2386 	}
2387 
2388 	/* set up the compute queues - allocate horizontally across pipes */
2389 	ring_id = 0;
2390 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2391 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2392 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2393 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
2394 								     k, j))
2395 					continue;
2396 
2397 				r = gfx_v9_0_compute_ring_init(adev,
2398 							       ring_id,
2399 							       i, k, j);
2400 				if (r)
2401 					return r;
2402 
2403 				ring_id++;
2404 			}
2405 		}
2406 	}
2407 
2408 	/* TODO: Add queue reset mask when FW fully supports it */
2409 	adev->gfx.gfx_supported_reset =
2410 		amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
2411 	adev->gfx.compute_supported_reset =
2412 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
2413 	if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset)
2414 		adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
2415 
2416 	r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
2417 	if (r) {
2418 		DRM_ERROR("Failed to init KIQ BOs!\n");
2419 		return r;
2420 	}
2421 
2422 	r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
2423 	if (r)
2424 		return r;
2425 
2426 	/* create MQD for all compute queues as wel as KIQ for SRIOV case */
2427 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0);
2428 	if (r)
2429 		return r;
2430 
2431 	adev->gfx.ce_ram_size = 0x8000;
2432 
2433 	r = gfx_v9_0_gpu_early_init(adev);
2434 	if (r)
2435 		return r;
2436 
2437 	if (amdgpu_gfx_ras_sw_init(adev)) {
2438 		dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
2439 		return -EINVAL;
2440 	}
2441 
2442 	gfx_v9_0_alloc_ip_dump(adev);
2443 
2444 	r = amdgpu_gfx_sysfs_init(adev);
2445 	if (r)
2446 		return r;
2447 
2448 	return 0;
2449 }
2450 
2451 
2452 static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
2453 {
2454 	int i;
2455 	struct amdgpu_device *adev = ip_block->adev;
2456 
2457 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
2458 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
2459 			amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]);
2460 		amdgpu_ring_mux_fini(&adev->gfx.muxer);
2461 	}
2462 
2463 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2464 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2465 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
2466 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2467 
2468 	amdgpu_gfx_mqd_sw_fini(adev, 0);
2469 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2470 	amdgpu_gfx_kiq_fini(adev, 0);
2471 
2472 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
2473 
2474 	gfx_v9_0_mec_fini(adev);
2475 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2476 				&adev->gfx.rlc.clear_state_gpu_addr,
2477 				(void **)&adev->gfx.rlc.cs_ptr);
2478 	if (adev->flags & AMD_IS_APU) {
2479 		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2480 				&adev->gfx.rlc.cp_table_gpu_addr,
2481 				(void **)&adev->gfx.rlc.cp_table_ptr);
2482 	}
2483 	gfx_v9_0_free_microcode(adev);
2484 
2485 	amdgpu_gfx_sysfs_fini(adev);
2486 
2487 	kfree(adev->gfx.ip_dump_core);
2488 	kfree(adev->gfx.ip_dump_compute_queues);
2489 
2490 	return 0;
2491 }
2492 
2493 
2494 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2495 {
2496 	/* TODO */
2497 }
2498 
2499 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2500 			   u32 instance, int xcc_id)
2501 {
2502 	u32 data;
2503 
2504 	if (instance == 0xffffffff)
2505 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2506 	else
2507 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2508 
2509 	if (se_num == 0xffffffff)
2510 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2511 	else
2512 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2513 
2514 	if (sh_num == 0xffffffff)
2515 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2516 	else
2517 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2518 
2519 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2520 }
2521 
2522 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2523 {
2524 	u32 data, mask;
2525 
2526 	data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2527 	data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2528 
2529 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2530 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2531 
2532 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2533 					 adev->gfx.config.max_sh_per_se);
2534 
2535 	return (~data) & mask;
2536 }
2537 
2538 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2539 {
2540 	int i, j;
2541 	u32 data;
2542 	u32 active_rbs = 0;
2543 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2544 					adev->gfx.config.max_sh_per_se;
2545 
2546 	mutex_lock(&adev->grbm_idx_mutex);
2547 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2548 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2549 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
2550 			data = gfx_v9_0_get_rb_active_bitmap(adev);
2551 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2552 					       rb_bitmap_width_per_sh);
2553 		}
2554 	}
2555 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2556 	mutex_unlock(&adev->grbm_idx_mutex);
2557 
2558 	adev->gfx.config.backend_enable_mask = active_rbs;
2559 	adev->gfx.config.num_rbs = hweight32(active_rbs);
2560 }
2561 
2562 static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev,
2563 				uint32_t first_vmid,
2564 				uint32_t last_vmid)
2565 {
2566 	uint32_t data;
2567 	uint32_t trap_config_vmid_mask = 0;
2568 	int i;
2569 
2570 	/* Calculate trap config vmid mask */
2571 	for (i = first_vmid; i < last_vmid; i++)
2572 		trap_config_vmid_mask |= (1 << i);
2573 
2574 	data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG,
2575 			VMID_SEL, trap_config_vmid_mask);
2576 	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
2577 			TRAP_EN, 1);
2578 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
2579 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
2580 
2581 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
2582 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
2583 }
2584 
2585 #define DEFAULT_SH_MEM_BASES	(0x6000)
2586 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2587 {
2588 	int i;
2589 	uint32_t sh_mem_config;
2590 	uint32_t sh_mem_bases;
2591 
2592 	/*
2593 	 * Configure apertures:
2594 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2595 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2596 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2597 	 */
2598 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2599 
2600 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2601 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2602 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2603 
2604 	mutex_lock(&adev->srbm_mutex);
2605 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2606 		soc15_grbm_select(adev, 0, 0, 0, i, 0);
2607 		/* CP and shaders */
2608 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2609 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2610 	}
2611 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
2612 	mutex_unlock(&adev->srbm_mutex);
2613 
2614 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
2615 	   access. These should be enabled by FW for target VMIDs. */
2616 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2617 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2618 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2619 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2620 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2621 	}
2622 }
2623 
2624 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2625 {
2626 	int vmid;
2627 
2628 	/*
2629 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2630 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
2631 	 * the driver can enable them for graphics. VMID0 should maintain
2632 	 * access so that HWS firmware can save/restore entries.
2633 	 */
2634 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2635 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2636 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2637 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2638 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2639 	}
2640 }
2641 
2642 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2643 {
2644 	uint32_t tmp;
2645 
2646 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2647 	case IP_VERSION(9, 4, 1):
2648 		tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2649 		tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
2650 				!READ_ONCE(adev->barrier_has_auto_waitcnt));
2651 		WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2652 		break;
2653 	case IP_VERSION(9, 4, 2):
2654 		gfx_v9_4_2_init_sq(adev);
2655 		break;
2656 	default:
2657 		break;
2658 	}
2659 }
2660 
2661 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2662 {
2663 	u32 tmp;
2664 	int i;
2665 
2666 	if (!amdgpu_sriov_vf(adev) ||
2667 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) {
2668 		WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2669 	}
2670 
2671 	gfx_v9_0_tiling_mode_table_init(adev);
2672 
2673 	if (adev->gfx.num_gfx_rings)
2674 		gfx_v9_0_setup_rb(adev);
2675 	gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2676 	adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2677 
2678 	/* XXX SH_MEM regs */
2679 	/* where to put LDS, scratch, GPUVM in FSA64 space */
2680 	mutex_lock(&adev->srbm_mutex);
2681 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
2682 		soc15_grbm_select(adev, 0, 0, 0, i, 0);
2683 		/* CP and shaders */
2684 		if (i == 0) {
2685 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2686 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2687 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2688 					    !!adev->gmc.noretry);
2689 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2690 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2691 		} else {
2692 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2693 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2694 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2695 					    !!adev->gmc.noretry);
2696 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2697 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2698 				(adev->gmc.private_aperture_start >> 48));
2699 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2700 				(adev->gmc.shared_aperture_start >> 48));
2701 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2702 		}
2703 	}
2704 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
2705 
2706 	mutex_unlock(&adev->srbm_mutex);
2707 
2708 	gfx_v9_0_init_compute_vmid(adev);
2709 	gfx_v9_0_init_gds_vmid(adev);
2710 	gfx_v9_0_init_sq_config(adev);
2711 }
2712 
2713 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2714 {
2715 	u32 i, j, k;
2716 	u32 mask;
2717 
2718 	mutex_lock(&adev->grbm_idx_mutex);
2719 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2720 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2721 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
2722 			for (k = 0; k < adev->usec_timeout; k++) {
2723 				if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2724 					break;
2725 				udelay(1);
2726 			}
2727 			if (k == adev->usec_timeout) {
2728 				amdgpu_gfx_select_se_sh(adev, 0xffffffff,
2729 						      0xffffffff, 0xffffffff, 0);
2730 				mutex_unlock(&adev->grbm_idx_mutex);
2731 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2732 					 i, j);
2733 				return;
2734 			}
2735 		}
2736 	}
2737 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2738 	mutex_unlock(&adev->grbm_idx_mutex);
2739 
2740 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2741 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2742 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2743 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2744 	for (k = 0; k < adev->usec_timeout; k++) {
2745 		if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2746 			break;
2747 		udelay(1);
2748 	}
2749 }
2750 
2751 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2752 					       bool enable)
2753 {
2754 	u32 tmp;
2755 
2756 	/* These interrupts should be enabled to drive DS clock */
2757 
2758 	tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2759 
2760 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2761 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2762 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2763 	if (adev->gfx.num_gfx_rings)
2764 		tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2765 
2766 	WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2767 }
2768 
2769 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2770 {
2771 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2772 	/* csib */
2773 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2774 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2775 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2776 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2777 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2778 			adev->gfx.rlc.clear_state_size);
2779 }
2780 
2781 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2782 				int indirect_offset,
2783 				int list_size,
2784 				int *unique_indirect_regs,
2785 				int unique_indirect_reg_count,
2786 				int *indirect_start_offsets,
2787 				int *indirect_start_offsets_count,
2788 				int max_start_offsets_count)
2789 {
2790 	int idx;
2791 
2792 	for (; indirect_offset < list_size; indirect_offset++) {
2793 		WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2794 		indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2795 		*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2796 
2797 		while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2798 			indirect_offset += 2;
2799 
2800 			/* look for the matching indice */
2801 			for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2802 				if (unique_indirect_regs[idx] ==
2803 					register_list_format[indirect_offset] ||
2804 					!unique_indirect_regs[idx])
2805 					break;
2806 			}
2807 
2808 			BUG_ON(idx >= unique_indirect_reg_count);
2809 
2810 			if (!unique_indirect_regs[idx])
2811 				unique_indirect_regs[idx] = register_list_format[indirect_offset];
2812 
2813 			indirect_offset++;
2814 		}
2815 	}
2816 }
2817 
2818 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2819 {
2820 	int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2821 	int unique_indirect_reg_count = 0;
2822 
2823 	int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2824 	int indirect_start_offsets_count = 0;
2825 
2826 	int list_size = 0;
2827 	int i = 0, j = 0;
2828 	u32 tmp = 0;
2829 
2830 	u32 *register_list_format =
2831 		kmemdup(adev->gfx.rlc.register_list_format,
2832 			adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2833 	if (!register_list_format)
2834 		return -ENOMEM;
2835 
2836 	/* setup unique_indirect_regs array and indirect_start_offsets array */
2837 	unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2838 	gfx_v9_1_parse_ind_reg_list(register_list_format,
2839 				    adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2840 				    adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2841 				    unique_indirect_regs,
2842 				    unique_indirect_reg_count,
2843 				    indirect_start_offsets,
2844 				    &indirect_start_offsets_count,
2845 				    ARRAY_SIZE(indirect_start_offsets));
2846 
2847 	/* enable auto inc in case it is disabled */
2848 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2849 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2850 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2851 
2852 	/* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2853 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2854 		RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2855 	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2856 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2857 			adev->gfx.rlc.register_restore[i]);
2858 
2859 	/* load indirect register */
2860 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2861 		adev->gfx.rlc.reg_list_format_start);
2862 
2863 	/* direct register portion */
2864 	for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2865 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2866 			register_list_format[i]);
2867 
2868 	/* indirect register portion */
2869 	while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2870 		if (register_list_format[i] == 0xFFFFFFFF) {
2871 			WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2872 			continue;
2873 		}
2874 
2875 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2876 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2877 
2878 		for (j = 0; j < unique_indirect_reg_count; j++) {
2879 			if (register_list_format[i] == unique_indirect_regs[j]) {
2880 				WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2881 				break;
2882 			}
2883 		}
2884 
2885 		BUG_ON(j >= unique_indirect_reg_count);
2886 
2887 		i++;
2888 	}
2889 
2890 	/* set save/restore list size */
2891 	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2892 	list_size = list_size >> 1;
2893 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2894 		adev->gfx.rlc.reg_restore_list_size);
2895 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2896 
2897 	/* write the starting offsets to RLC scratch ram */
2898 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2899 		adev->gfx.rlc.starting_offsets_start);
2900 	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2901 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2902 		       indirect_start_offsets[i]);
2903 
2904 	/* load unique indirect regs*/
2905 	for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2906 		if (unique_indirect_regs[i] != 0) {
2907 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2908 			       + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2909 			       unique_indirect_regs[i] & 0x3FFFF);
2910 
2911 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2912 			       + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2913 			       unique_indirect_regs[i] >> 20);
2914 		}
2915 	}
2916 
2917 	kfree(register_list_format);
2918 	return 0;
2919 }
2920 
2921 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2922 {
2923 	WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2924 }
2925 
2926 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2927 					     bool enable)
2928 {
2929 	uint32_t data = 0;
2930 	uint32_t default_data = 0;
2931 
2932 	default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2933 	if (enable) {
2934 		/* enable GFXIP control over CGPG */
2935 		data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2936 		if(default_data != data)
2937 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2938 
2939 		/* update status */
2940 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2941 		data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2942 		if(default_data != data)
2943 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2944 	} else {
2945 		/* restore GFXIP control over GCPG */
2946 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2947 		if(default_data != data)
2948 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2949 	}
2950 }
2951 
2952 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2953 {
2954 	uint32_t data = 0;
2955 
2956 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2957 			      AMD_PG_SUPPORT_GFX_SMG |
2958 			      AMD_PG_SUPPORT_GFX_DMG)) {
2959 		/* init IDLE_POLL_COUNT = 60 */
2960 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2961 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2962 		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2963 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2964 
2965 		/* init RLC PG Delay */
2966 		data = 0;
2967 		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2968 		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2969 		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2970 		data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2971 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2972 
2973 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2974 		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2975 		data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2976 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2977 
2978 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2979 		data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2980 		data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2981 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2982 
2983 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2984 		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2985 
2986 		/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2987 		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2988 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2989 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 3, 0))
2990 			pwr_10_0_gfxip_control_over_cgpg(adev, true);
2991 	}
2992 }
2993 
2994 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2995 						bool enable)
2996 {
2997 	uint32_t data = 0;
2998 	uint32_t default_data = 0;
2999 
3000 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3001 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3002 			     SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
3003 			     enable ? 1 : 0);
3004 	if (default_data != data)
3005 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3006 }
3007 
3008 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
3009 						bool enable)
3010 {
3011 	uint32_t data = 0;
3012 	uint32_t default_data = 0;
3013 
3014 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3015 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3016 			     SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
3017 			     enable ? 1 : 0);
3018 	if(default_data != data)
3019 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3020 }
3021 
3022 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
3023 					bool enable)
3024 {
3025 	uint32_t data = 0;
3026 	uint32_t default_data = 0;
3027 
3028 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3029 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3030 			     CP_PG_DISABLE,
3031 			     enable ? 0 : 1);
3032 	if(default_data != data)
3033 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3034 }
3035 
3036 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
3037 						bool enable)
3038 {
3039 	uint32_t data, default_data;
3040 
3041 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3042 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3043 			     GFX_POWER_GATING_ENABLE,
3044 			     enable ? 1 : 0);
3045 	if(default_data != data)
3046 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3047 }
3048 
3049 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
3050 						bool enable)
3051 {
3052 	uint32_t data, default_data;
3053 
3054 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3055 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3056 			     GFX_PIPELINE_PG_ENABLE,
3057 			     enable ? 1 : 0);
3058 	if(default_data != data)
3059 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3060 
3061 	if (!enable)
3062 		/* read any GFX register to wake up GFX */
3063 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
3064 }
3065 
3066 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
3067 						       bool enable)
3068 {
3069 	uint32_t data, default_data;
3070 
3071 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3072 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3073 			     STATIC_PER_CU_PG_ENABLE,
3074 			     enable ? 1 : 0);
3075 	if(default_data != data)
3076 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3077 }
3078 
3079 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
3080 						bool enable)
3081 {
3082 	uint32_t data, default_data;
3083 
3084 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3085 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3086 			     DYN_PER_CU_PG_ENABLE,
3087 			     enable ? 1 : 0);
3088 	if(default_data != data)
3089 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3090 }
3091 
3092 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
3093 {
3094 	gfx_v9_0_init_csb(adev);
3095 
3096 	/*
3097 	 * Rlc save restore list is workable since v2_1.
3098 	 * And it's needed by gfxoff feature.
3099 	 */
3100 	if (adev->gfx.rlc.is_rlc_v2_1) {
3101 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
3102 			    IP_VERSION(9, 2, 1) ||
3103 		    (adev->apu_flags & AMD_APU_IS_RAVEN2))
3104 			gfx_v9_1_init_rlc_save_restore_list(adev);
3105 		gfx_v9_0_enable_save_restore_machine(adev);
3106 	}
3107 
3108 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3109 			      AMD_PG_SUPPORT_GFX_SMG |
3110 			      AMD_PG_SUPPORT_GFX_DMG |
3111 			      AMD_PG_SUPPORT_CP |
3112 			      AMD_PG_SUPPORT_GDS |
3113 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
3114 		WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
3115 			     adev->gfx.rlc.cp_table_gpu_addr >> 8);
3116 		gfx_v9_0_init_gfx_power_gating(adev);
3117 	}
3118 }
3119 
3120 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3121 {
3122 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3123 	gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3124 	gfx_v9_0_wait_for_rlc_serdes(adev);
3125 }
3126 
3127 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3128 {
3129 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3130 	udelay(50);
3131 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3132 	udelay(50);
3133 }
3134 
3135 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3136 {
3137 #ifdef AMDGPU_RLC_DEBUG_RETRY
3138 	u32 rlc_ucode_ver;
3139 #endif
3140 
3141 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3142 	udelay(50);
3143 
3144 	/* carrizo do enable cp interrupt after cp inited */
3145 	if (!(adev->flags & AMD_IS_APU)) {
3146 		gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3147 		udelay(50);
3148 	}
3149 
3150 #ifdef AMDGPU_RLC_DEBUG_RETRY
3151 	/* RLC_GPM_GENERAL_6 : RLC Ucode version */
3152 	rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3153 	if(rlc_ucode_ver == 0x108) {
3154 		DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3155 				rlc_ucode_ver, adev->gfx.rlc_fw_version);
3156 		/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3157 		 * default is 0x9C4 to create a 100us interval */
3158 		WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3159 		/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3160 		 * to disable the page fault retry interrupts, default is
3161 		 * 0x100 (256) */
3162 		WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3163 	}
3164 #endif
3165 }
3166 
3167 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3168 {
3169 	const struct rlc_firmware_header_v2_0 *hdr;
3170 	const __le32 *fw_data;
3171 	unsigned i, fw_size;
3172 
3173 	if (!adev->gfx.rlc_fw)
3174 		return -EINVAL;
3175 
3176 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3177 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3178 
3179 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3180 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3181 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3182 
3183 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3184 			RLCG_UCODE_LOADING_START_ADDRESS);
3185 	for (i = 0; i < fw_size; i++)
3186 		WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3187 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3188 
3189 	return 0;
3190 }
3191 
3192 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3193 {
3194 	int r;
3195 
3196 	if (amdgpu_sriov_vf(adev)) {
3197 		gfx_v9_0_init_csb(adev);
3198 		return 0;
3199 	}
3200 
3201 	adev->gfx.rlc.funcs->stop(adev);
3202 
3203 	/* disable CG */
3204 	WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3205 
3206 	gfx_v9_0_init_pg(adev);
3207 
3208 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3209 		/* legacy rlc firmware loading */
3210 		r = gfx_v9_0_rlc_load_microcode(adev);
3211 		if (r)
3212 			return r;
3213 	}
3214 
3215 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3216 	case IP_VERSION(9, 2, 2):
3217 	case IP_VERSION(9, 1, 0):
3218 		gfx_v9_0_init_lbpw(adev);
3219 		if (amdgpu_lbpw == 0)
3220 			gfx_v9_0_enable_lbpw(adev, false);
3221 		else
3222 			gfx_v9_0_enable_lbpw(adev, true);
3223 		break;
3224 	case IP_VERSION(9, 4, 0):
3225 		gfx_v9_4_init_lbpw(adev);
3226 		if (amdgpu_lbpw > 0)
3227 			gfx_v9_0_enable_lbpw(adev, true);
3228 		else
3229 			gfx_v9_0_enable_lbpw(adev, false);
3230 		break;
3231 	default:
3232 		break;
3233 	}
3234 
3235 	gfx_v9_0_update_spm_vmid_internal(adev, 0xf);
3236 
3237 	adev->gfx.rlc.funcs->start(adev);
3238 
3239 	return 0;
3240 }
3241 
3242 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3243 {
3244 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3245 
3246 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1);
3247 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1);
3248 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1);
3249 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1);
3250 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1);
3251 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1);
3252 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1);
3253 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1);
3254 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1);
3255 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3256 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3257 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3258 	WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3259 	udelay(50);
3260 }
3261 
3262 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3263 {
3264 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
3265 	const struct gfx_firmware_header_v1_0 *ce_hdr;
3266 	const struct gfx_firmware_header_v1_0 *me_hdr;
3267 	const __le32 *fw_data;
3268 	unsigned i, fw_size;
3269 
3270 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3271 		return -EINVAL;
3272 
3273 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3274 		adev->gfx.pfp_fw->data;
3275 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3276 		adev->gfx.ce_fw->data;
3277 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
3278 		adev->gfx.me_fw->data;
3279 
3280 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3281 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3282 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3283 
3284 	gfx_v9_0_cp_gfx_enable(adev, false);
3285 
3286 	/* PFP */
3287 	fw_data = (const __le32 *)
3288 		(adev->gfx.pfp_fw->data +
3289 		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3290 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3291 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3292 	for (i = 0; i < fw_size; i++)
3293 		WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3294 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3295 
3296 	/* CE */
3297 	fw_data = (const __le32 *)
3298 		(adev->gfx.ce_fw->data +
3299 		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3300 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3301 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3302 	for (i = 0; i < fw_size; i++)
3303 		WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3304 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3305 
3306 	/* ME */
3307 	fw_data = (const __le32 *)
3308 		(adev->gfx.me_fw->data +
3309 		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3310 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3311 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3312 	for (i = 0; i < fw_size; i++)
3313 		WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3314 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3315 
3316 	return 0;
3317 }
3318 
3319 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3320 {
3321 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3322 	const struct cs_section_def *sect = NULL;
3323 	const struct cs_extent_def *ext = NULL;
3324 	int r, i, tmp;
3325 
3326 	/* init the CP */
3327 	WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3328 	WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3329 
3330 	gfx_v9_0_cp_gfx_enable(adev, true);
3331 
3332 	/* Now only limit the quirk on the APU gfx9 series and already
3333 	 * confirmed that the APU gfx10/gfx11 needn't such update.
3334 	 */
3335 	if (adev->flags & AMD_IS_APU &&
3336 			adev->in_s3 && !pm_resume_via_firmware()) {
3337 		DRM_INFO("Will skip the CSB packet resubmit\n");
3338 		return 0;
3339 	}
3340 	r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3341 	if (r) {
3342 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3343 		return r;
3344 	}
3345 
3346 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3347 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3348 
3349 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3350 	amdgpu_ring_write(ring, 0x80000000);
3351 	amdgpu_ring_write(ring, 0x80000000);
3352 
3353 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3354 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3355 			if (sect->id == SECT_CONTEXT) {
3356 				amdgpu_ring_write(ring,
3357 				       PACKET3(PACKET3_SET_CONTEXT_REG,
3358 					       ext->reg_count));
3359 				amdgpu_ring_write(ring,
3360 				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3361 				for (i = 0; i < ext->reg_count; i++)
3362 					amdgpu_ring_write(ring, ext->extent[i]);
3363 			}
3364 		}
3365 	}
3366 
3367 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3368 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3369 
3370 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3371 	amdgpu_ring_write(ring, 0);
3372 
3373 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3374 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3375 	amdgpu_ring_write(ring, 0x8000);
3376 	amdgpu_ring_write(ring, 0x8000);
3377 
3378 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3379 	tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3380 		(SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3381 	amdgpu_ring_write(ring, tmp);
3382 	amdgpu_ring_write(ring, 0);
3383 
3384 	amdgpu_ring_commit(ring);
3385 
3386 	return 0;
3387 }
3388 
3389 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3390 {
3391 	struct amdgpu_ring *ring;
3392 	u32 tmp;
3393 	u32 rb_bufsz;
3394 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
3395 
3396 	/* Set the write pointer delay */
3397 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3398 
3399 	/* set the RB to use vmid 0 */
3400 	WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3401 
3402 	/* Set ring buffer size */
3403 	ring = &adev->gfx.gfx_ring[0];
3404 	rb_bufsz = order_base_2(ring->ring_size / 8);
3405 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3406 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3407 #ifdef __BIG_ENDIAN
3408 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3409 #endif
3410 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3411 
3412 	/* Initialize the ring buffer's write pointers */
3413 	ring->wptr = 0;
3414 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3415 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3416 
3417 	/* set the wb address whether it's enabled or not */
3418 	rptr_addr = ring->rptr_gpu_addr;
3419 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3420 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3421 
3422 	wptr_gpu_addr = ring->wptr_gpu_addr;
3423 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3424 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3425 
3426 	mdelay(1);
3427 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3428 
3429 	rb_addr = ring->gpu_addr >> 8;
3430 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3431 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3432 
3433 	tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3434 	if (ring->use_doorbell) {
3435 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3436 				    DOORBELL_OFFSET, ring->doorbell_index);
3437 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3438 				    DOORBELL_EN, 1);
3439 	} else {
3440 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3441 	}
3442 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3443 
3444 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3445 			DOORBELL_RANGE_LOWER, ring->doorbell_index);
3446 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3447 
3448 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3449 		       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3450 
3451 
3452 	/* start the ring */
3453 	gfx_v9_0_cp_gfx_start(adev);
3454 
3455 	return 0;
3456 }
3457 
3458 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3459 {
3460 	if (enable) {
3461 		WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3462 	} else {
3463 		WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3464 				 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
3465 				  CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
3466 				  CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
3467 				  CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
3468 				  CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
3469 				  CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
3470 				  CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
3471 				  CP_MEC_CNTL__MEC_ME1_HALT_MASK |
3472 				  CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3473 		adev->gfx.kiq[0].ring.sched.ready = false;
3474 	}
3475 	udelay(50);
3476 }
3477 
3478 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3479 {
3480 	const struct gfx_firmware_header_v1_0 *mec_hdr;
3481 	const __le32 *fw_data;
3482 	unsigned i;
3483 	u32 tmp;
3484 
3485 	if (!adev->gfx.mec_fw)
3486 		return -EINVAL;
3487 
3488 	gfx_v9_0_cp_compute_enable(adev, false);
3489 
3490 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3491 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3492 
3493 	fw_data = (const __le32 *)
3494 		(adev->gfx.mec_fw->data +
3495 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3496 	tmp = 0;
3497 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3498 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3499 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3500 
3501 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3502 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3503 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3504 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3505 
3506 	/* MEC1 */
3507 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3508 			 mec_hdr->jt_offset);
3509 	for (i = 0; i < mec_hdr->jt_size; i++)
3510 		WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3511 			le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3512 
3513 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3514 			adev->gfx.mec_fw_version);
3515 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3516 
3517 	return 0;
3518 }
3519 
3520 /* KIQ functions */
3521 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3522 {
3523 	uint32_t tmp;
3524 	struct amdgpu_device *adev = ring->adev;
3525 
3526 	/* tell RLC which is KIQ queue */
3527 	tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3528 	tmp &= 0xffffff00;
3529 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3530 	WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
3531 }
3532 
3533 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3534 {
3535 	struct amdgpu_device *adev = ring->adev;
3536 
3537 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3538 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
3539 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3540 			mqd->cp_hqd_queue_priority =
3541 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3542 		}
3543 	}
3544 }
3545 
3546 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3547 {
3548 	struct amdgpu_device *adev = ring->adev;
3549 	struct v9_mqd *mqd = ring->mqd_ptr;
3550 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3551 	uint32_t tmp;
3552 
3553 	mqd->header = 0xC0310800;
3554 	mqd->compute_pipelinestat_enable = 0x00000001;
3555 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3556 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3557 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3558 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3559 	mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3560 	mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3561 	mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3562 	mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3563 	mqd->compute_misc_reserved = 0x00000003;
3564 
3565 	mqd->dynamic_cu_mask_addr_lo =
3566 		lower_32_bits(ring->mqd_gpu_addr
3567 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3568 	mqd->dynamic_cu_mask_addr_hi =
3569 		upper_32_bits(ring->mqd_gpu_addr
3570 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3571 
3572 	eop_base_addr = ring->eop_gpu_addr >> 8;
3573 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3574 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3575 
3576 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3577 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3578 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3579 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3580 
3581 	mqd->cp_hqd_eop_control = tmp;
3582 
3583 	/* enable doorbell? */
3584 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3585 
3586 	if (ring->use_doorbell) {
3587 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3588 				    DOORBELL_OFFSET, ring->doorbell_index);
3589 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3590 				    DOORBELL_EN, 1);
3591 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3592 				    DOORBELL_SOURCE, 0);
3593 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3594 				    DOORBELL_HIT, 0);
3595 	} else {
3596 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3597 					 DOORBELL_EN, 0);
3598 	}
3599 
3600 	mqd->cp_hqd_pq_doorbell_control = tmp;
3601 
3602 	/* disable the queue if it's active */
3603 	ring->wptr = 0;
3604 	mqd->cp_hqd_dequeue_request = 0;
3605 	mqd->cp_hqd_pq_rptr = 0;
3606 	mqd->cp_hqd_pq_wptr_lo = 0;
3607 	mqd->cp_hqd_pq_wptr_hi = 0;
3608 
3609 	/* set the pointer to the MQD */
3610 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3611 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3612 
3613 	/* set MQD vmid to 0 */
3614 	tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3615 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3616 	mqd->cp_mqd_control = tmp;
3617 
3618 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3619 	hqd_gpu_addr = ring->gpu_addr >> 8;
3620 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3621 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3622 
3623 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3624 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3625 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3626 			    (order_base_2(ring->ring_size / 4) - 1));
3627 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3628 			(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3629 #ifdef __BIG_ENDIAN
3630 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3631 #endif
3632 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3633 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3634 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3635 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3636 	mqd->cp_hqd_pq_control = tmp;
3637 
3638 	/* set the wb address whether it's enabled or not */
3639 	wb_gpu_addr = ring->rptr_gpu_addr;
3640 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3641 	mqd->cp_hqd_pq_rptr_report_addr_hi =
3642 		upper_32_bits(wb_gpu_addr) & 0xffff;
3643 
3644 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3645 	wb_gpu_addr = ring->wptr_gpu_addr;
3646 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3647 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3648 
3649 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3650 	ring->wptr = 0;
3651 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3652 
3653 	/* set the vmid for the queue */
3654 	mqd->cp_hqd_vmid = 0;
3655 
3656 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3657 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3658 	mqd->cp_hqd_persistent_state = tmp;
3659 
3660 	/* set MIN_IB_AVAIL_SIZE */
3661 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3662 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3663 	mqd->cp_hqd_ib_control = tmp;
3664 
3665 	/* set static priority for a queue/ring */
3666 	gfx_v9_0_mqd_set_priority(ring, mqd);
3667 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
3668 
3669 	/* map_queues packet doesn't need activate the queue,
3670 	 * so only kiq need set this field.
3671 	 */
3672 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3673 		mqd->cp_hqd_active = 1;
3674 
3675 	return 0;
3676 }
3677 
3678 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3679 {
3680 	struct amdgpu_device *adev = ring->adev;
3681 	struct v9_mqd *mqd = ring->mqd_ptr;
3682 	int j;
3683 
3684 	/* disable wptr polling */
3685 	WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3686 
3687 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3688 	       mqd->cp_hqd_eop_base_addr_lo);
3689 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3690 	       mqd->cp_hqd_eop_base_addr_hi);
3691 
3692 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3693 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3694 	       mqd->cp_hqd_eop_control);
3695 
3696 	/* enable doorbell? */
3697 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3698 	       mqd->cp_hqd_pq_doorbell_control);
3699 
3700 	/* disable the queue if it's active */
3701 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3702 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3703 		for (j = 0; j < adev->usec_timeout; j++) {
3704 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3705 				break;
3706 			udelay(1);
3707 		}
3708 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3709 		       mqd->cp_hqd_dequeue_request);
3710 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3711 		       mqd->cp_hqd_pq_rptr);
3712 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3713 		       mqd->cp_hqd_pq_wptr_lo);
3714 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3715 		       mqd->cp_hqd_pq_wptr_hi);
3716 	}
3717 
3718 	/* set the pointer to the MQD */
3719 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3720 	       mqd->cp_mqd_base_addr_lo);
3721 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3722 	       mqd->cp_mqd_base_addr_hi);
3723 
3724 	/* set MQD vmid to 0 */
3725 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3726 	       mqd->cp_mqd_control);
3727 
3728 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3729 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3730 	       mqd->cp_hqd_pq_base_lo);
3731 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3732 	       mqd->cp_hqd_pq_base_hi);
3733 
3734 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3735 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3736 	       mqd->cp_hqd_pq_control);
3737 
3738 	/* set the wb address whether it's enabled or not */
3739 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3740 				mqd->cp_hqd_pq_rptr_report_addr_lo);
3741 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3742 				mqd->cp_hqd_pq_rptr_report_addr_hi);
3743 
3744 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3745 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3746 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3747 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3748 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3749 
3750 	/* enable the doorbell if requested */
3751 	if (ring->use_doorbell) {
3752 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3753 					(adev->doorbell_index.kiq * 2) << 2);
3754 		/* If GC has entered CGPG, ringing doorbell > first page
3755 		 * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
3756 		 * workaround this issue. And this change has to align with firmware
3757 		 * update.
3758 		 */
3759 		if (check_if_enlarge_doorbell_range(adev))
3760 			WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3761 					(adev->doorbell.size - 4));
3762 		else
3763 			WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3764 					(adev->doorbell_index.userqueue_end * 2) << 2);
3765 	}
3766 
3767 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3768 	       mqd->cp_hqd_pq_doorbell_control);
3769 
3770 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3771 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3772 	       mqd->cp_hqd_pq_wptr_lo);
3773 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3774 	       mqd->cp_hqd_pq_wptr_hi);
3775 
3776 	/* set the vmid for the queue */
3777 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3778 
3779 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3780 	       mqd->cp_hqd_persistent_state);
3781 
3782 	/* activate the queue */
3783 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3784 	       mqd->cp_hqd_active);
3785 
3786 	if (ring->use_doorbell)
3787 		WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3788 
3789 	return 0;
3790 }
3791 
3792 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3793 {
3794 	struct amdgpu_device *adev = ring->adev;
3795 	int j;
3796 
3797 	/* disable the queue if it's active */
3798 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3799 
3800 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3801 
3802 		for (j = 0; j < adev->usec_timeout; j++) {
3803 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3804 				break;
3805 			udelay(1);
3806 		}
3807 
3808 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3809 			DRM_DEBUG("KIQ dequeue request failed.\n");
3810 
3811 			/* Manual disable if dequeue request times out */
3812 			WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3813 		}
3814 
3815 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3816 		      0);
3817 	}
3818 
3819 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3820 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3821 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3822 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3823 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3824 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3825 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3826 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3827 
3828 	return 0;
3829 }
3830 
3831 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3832 {
3833 	struct amdgpu_device *adev = ring->adev;
3834 	struct v9_mqd *mqd = ring->mqd_ptr;
3835 	struct v9_mqd *tmp_mqd;
3836 
3837 	gfx_v9_0_kiq_setting(ring);
3838 
3839 	/* GPU could be in bad state during probe, driver trigger the reset
3840 	 * after load the SMU, in this case , the mqd is not be initialized.
3841 	 * driver need to re-init the mqd.
3842 	 * check mqd->cp_hqd_pq_control since this value should not be 0
3843 	 */
3844 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup;
3845 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
3846 		/* for GPU_RESET case , reset MQD to a clean status */
3847 		if (adev->gfx.kiq[0].mqd_backup)
3848 			memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation));
3849 
3850 		/* reset ring buffer */
3851 		ring->wptr = 0;
3852 		amdgpu_ring_clear_ring(ring);
3853 
3854 		mutex_lock(&adev->srbm_mutex);
3855 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3856 		gfx_v9_0_kiq_init_register(ring);
3857 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3858 		mutex_unlock(&adev->srbm_mutex);
3859 	} else {
3860 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3861 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3862 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3863 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
3864 			amdgpu_ring_clear_ring(ring);
3865 		mutex_lock(&adev->srbm_mutex);
3866 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3867 		gfx_v9_0_mqd_init(ring);
3868 		gfx_v9_0_kiq_init_register(ring);
3869 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3870 		mutex_unlock(&adev->srbm_mutex);
3871 
3872 		if (adev->gfx.kiq[0].mqd_backup)
3873 			memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
3874 	}
3875 
3876 	return 0;
3877 }
3878 
3879 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
3880 {
3881 	struct amdgpu_device *adev = ring->adev;
3882 	struct v9_mqd *mqd = ring->mqd_ptr;
3883 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
3884 	struct v9_mqd *tmp_mqd;
3885 
3886 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
3887 	 * is not be initialized before
3888 	 */
3889 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3890 
3891 	if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
3892 	    (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
3893 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3894 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3895 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3896 		mutex_lock(&adev->srbm_mutex);
3897 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3898 		gfx_v9_0_mqd_init(ring);
3899 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3900 		mutex_unlock(&adev->srbm_mutex);
3901 
3902 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3903 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3904 	} else {
3905 		/* restore MQD to a clean status */
3906 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3907 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3908 		/* reset ring buffer */
3909 		ring->wptr = 0;
3910 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
3911 		amdgpu_ring_clear_ring(ring);
3912 	}
3913 
3914 	return 0;
3915 }
3916 
3917 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3918 {
3919 	gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
3920 	return 0;
3921 }
3922 
3923 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3924 {
3925 	int i, r;
3926 
3927 	gfx_v9_0_cp_compute_enable(adev, true);
3928 
3929 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3930 		r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
3931 		if (r)
3932 			return r;
3933 	}
3934 
3935 	return amdgpu_gfx_enable_kcq(adev, 0);
3936 }
3937 
3938 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3939 {
3940 	int r, i;
3941 	struct amdgpu_ring *ring;
3942 
3943 	if (!(adev->flags & AMD_IS_APU))
3944 		gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3945 
3946 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3947 		if (adev->gfx.num_gfx_rings) {
3948 			/* legacy firmware loading */
3949 			r = gfx_v9_0_cp_gfx_load_microcode(adev);
3950 			if (r)
3951 				return r;
3952 		}
3953 
3954 		r = gfx_v9_0_cp_compute_load_microcode(adev);
3955 		if (r)
3956 			return r;
3957 	}
3958 
3959 	if (adev->gfx.num_gfx_rings)
3960 		gfx_v9_0_cp_gfx_enable(adev, false);
3961 	gfx_v9_0_cp_compute_enable(adev, false);
3962 
3963 	r = gfx_v9_0_kiq_resume(adev);
3964 	if (r)
3965 		return r;
3966 
3967 	if (adev->gfx.num_gfx_rings) {
3968 		r = gfx_v9_0_cp_gfx_resume(adev);
3969 		if (r)
3970 			return r;
3971 	}
3972 
3973 	r = gfx_v9_0_kcq_resume(adev);
3974 	if (r)
3975 		return r;
3976 
3977 	if (adev->gfx.num_gfx_rings) {
3978 		ring = &adev->gfx.gfx_ring[0];
3979 		r = amdgpu_ring_test_helper(ring);
3980 		if (r)
3981 			return r;
3982 	}
3983 
3984 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3985 		ring = &adev->gfx.compute_ring[i];
3986 		amdgpu_ring_test_helper(ring);
3987 	}
3988 
3989 	gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3990 
3991 	return 0;
3992 }
3993 
3994 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3995 {
3996 	u32 tmp;
3997 
3998 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1) &&
3999 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
4000 		return;
4001 
4002 	tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
4003 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
4004 				adev->df.hash_status.hash_64k);
4005 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
4006 				adev->df.hash_status.hash_2m);
4007 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
4008 				adev->df.hash_status.hash_1g);
4009 	WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
4010 }
4011 
4012 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
4013 {
4014 	if (adev->gfx.num_gfx_rings)
4015 		gfx_v9_0_cp_gfx_enable(adev, enable);
4016 	gfx_v9_0_cp_compute_enable(adev, enable);
4017 }
4018 
4019 static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
4020 {
4021 	int r;
4022 	struct amdgpu_device *adev = ip_block->adev;
4023 
4024 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
4025 				       adev->gfx.cleaner_shader_ptr);
4026 
4027 	if (!amdgpu_sriov_vf(adev))
4028 		gfx_v9_0_init_golden_registers(adev);
4029 
4030 	gfx_v9_0_constants_init(adev);
4031 
4032 	gfx_v9_0_init_tcp_config(adev);
4033 
4034 	r = adev->gfx.rlc.funcs->resume(adev);
4035 	if (r)
4036 		return r;
4037 
4038 	r = gfx_v9_0_cp_resume(adev);
4039 	if (r)
4040 		return r;
4041 
4042 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) &&
4043 	    !amdgpu_sriov_vf(adev))
4044 		gfx_v9_4_2_set_power_brake_sequence(adev);
4045 
4046 	return r;
4047 }
4048 
4049 static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
4050 {
4051 	struct amdgpu_device *adev = ip_block->adev;
4052 
4053 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4054 		amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4055 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4056 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4057 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
4058 
4059 	/* DF freeze and kcq disable will fail */
4060 	if (!amdgpu_ras_intr_triggered())
4061 		/* disable KCQ to avoid CPC touch memory not valid anymore */
4062 		amdgpu_gfx_disable_kcq(adev, 0);
4063 
4064 	if (amdgpu_sriov_vf(adev)) {
4065 		gfx_v9_0_cp_gfx_enable(adev, false);
4066 		/* must disable polling for SRIOV when hw finished, otherwise
4067 		 * CPC engine may still keep fetching WB address which is already
4068 		 * invalid after sw finished and trigger DMAR reading error in
4069 		 * hypervisor side.
4070 		 */
4071 		WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4072 		return 0;
4073 	}
4074 
4075 	/* Use deinitialize sequence from CAIL when unbinding device from driver,
4076 	 * otherwise KIQ is hanging when binding back
4077 	 */
4078 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4079 		mutex_lock(&adev->srbm_mutex);
4080 		soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
4081 				adev->gfx.kiq[0].ring.pipe,
4082 				adev->gfx.kiq[0].ring.queue, 0, 0);
4083 		gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
4084 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4085 		mutex_unlock(&adev->srbm_mutex);
4086 	}
4087 
4088 	gfx_v9_0_cp_enable(adev, false);
4089 
4090 	/* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
4091 	if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
4092 	    (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) {
4093 		dev_dbg(adev->dev, "Skipping RLC halt\n");
4094 		return 0;
4095 	}
4096 
4097 	adev->gfx.rlc.funcs->stop(adev);
4098 	return 0;
4099 }
4100 
4101 static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
4102 {
4103 	return gfx_v9_0_hw_fini(ip_block);
4104 }
4105 
4106 static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
4107 {
4108 	return gfx_v9_0_hw_init(ip_block);
4109 }
4110 
4111 static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
4112 {
4113 	struct amdgpu_device *adev = ip_block->adev;
4114 
4115 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
4116 				GRBM_STATUS, GUI_ACTIVE))
4117 		return false;
4118 	else
4119 		return true;
4120 }
4121 
4122 static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4123 {
4124 	unsigned i;
4125 	struct amdgpu_device *adev = ip_block->adev;
4126 
4127 	for (i = 0; i < adev->usec_timeout; i++) {
4128 		if (gfx_v9_0_is_idle(ip_block))
4129 			return 0;
4130 		udelay(1);
4131 	}
4132 	return -ETIMEDOUT;
4133 }
4134 
4135 static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
4136 {
4137 	u32 grbm_soft_reset = 0;
4138 	u32 tmp;
4139 	struct amdgpu_device *adev = ip_block->adev;
4140 
4141 	/* GRBM_STATUS */
4142 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4143 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4144 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4145 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4146 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4147 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4148 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4149 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4150 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4151 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4152 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4153 	}
4154 
4155 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4156 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4157 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4158 	}
4159 
4160 	/* GRBM_STATUS2 */
4161 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4162 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4163 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4164 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4165 
4166 
4167 	if (grbm_soft_reset) {
4168 		/* stop the rlc */
4169 		adev->gfx.rlc.funcs->stop(adev);
4170 
4171 		if (adev->gfx.num_gfx_rings)
4172 			/* Disable GFX parsing/prefetching */
4173 			gfx_v9_0_cp_gfx_enable(adev, false);
4174 
4175 		/* Disable MEC parsing/prefetching */
4176 		gfx_v9_0_cp_compute_enable(adev, false);
4177 
4178 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4179 		tmp |= grbm_soft_reset;
4180 		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4181 		WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4182 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4183 
4184 		udelay(50);
4185 
4186 		tmp &= ~grbm_soft_reset;
4187 		WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4188 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4189 
4190 		/* Wait a little for things to settle down */
4191 		udelay(50);
4192 	}
4193 	return 0;
4194 }
4195 
4196 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4197 {
4198 	signed long r, cnt = 0;
4199 	unsigned long flags;
4200 	uint32_t seq, reg_val_offs = 0;
4201 	uint64_t value = 0;
4202 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
4203 	struct amdgpu_ring *ring = &kiq->ring;
4204 
4205 	BUG_ON(!ring->funcs->emit_rreg);
4206 
4207 	spin_lock_irqsave(&kiq->ring_lock, flags);
4208 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4209 		pr_err("critical bug! too many kiq readers\n");
4210 		goto failed_unlock;
4211 	}
4212 	amdgpu_ring_alloc(ring, 32);
4213 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4214 	amdgpu_ring_write(ring, 9 |	/* src: register*/
4215 				(5 << 8) |	/* dst: memory */
4216 				(1 << 16) |	/* count sel */
4217 				(1 << 20));	/* write confirm */
4218 	amdgpu_ring_write(ring, 0);
4219 	amdgpu_ring_write(ring, 0);
4220 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4221 				reg_val_offs * 4));
4222 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4223 				reg_val_offs * 4));
4224 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4225 	if (r)
4226 		goto failed_undo;
4227 
4228 	amdgpu_ring_commit(ring);
4229 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4230 
4231 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4232 
4233 	/* don't wait anymore for gpu reset case because this way may
4234 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4235 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4236 	 * never return if we keep waiting in virt_kiq_rreg, which cause
4237 	 * gpu_recover() hang there.
4238 	 *
4239 	 * also don't wait anymore for IRQ context
4240 	 * */
4241 	if (r < 1 && (amdgpu_in_reset(adev)))
4242 		goto failed_kiq_read;
4243 
4244 	might_sleep();
4245 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4246 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4247 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4248 	}
4249 
4250 	if (cnt > MAX_KIQ_REG_TRY)
4251 		goto failed_kiq_read;
4252 
4253 	mb();
4254 	value = (uint64_t)adev->wb.wb[reg_val_offs] |
4255 		(uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4256 	amdgpu_device_wb_free(adev, reg_val_offs);
4257 	return value;
4258 
4259 failed_undo:
4260 	amdgpu_ring_undo(ring);
4261 failed_unlock:
4262 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4263 failed_kiq_read:
4264 	if (reg_val_offs)
4265 		amdgpu_device_wb_free(adev, reg_val_offs);
4266 	pr_err("failed to read gpu clock\n");
4267 	return ~0;
4268 }
4269 
4270 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4271 {
4272 	uint64_t clock, clock_lo, clock_hi, hi_check;
4273 
4274 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4275 	case IP_VERSION(9, 3, 0):
4276 		preempt_disable();
4277 		clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4278 		clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4279 		hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4280 		/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
4281 		 * roughly every 42 seconds.
4282 		 */
4283 		if (hi_check != clock_hi) {
4284 			clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4285 			clock_hi = hi_check;
4286 		}
4287 		preempt_enable();
4288 		clock = clock_lo | (clock_hi << 32ULL);
4289 		break;
4290 	default:
4291 		amdgpu_gfx_off_ctrl(adev, false);
4292 		mutex_lock(&adev->gfx.gpu_clock_mutex);
4293 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
4294 			    IP_VERSION(9, 0, 1) &&
4295 		    amdgpu_sriov_runtime(adev)) {
4296 			clock = gfx_v9_0_kiq_read_clock(adev);
4297 		} else {
4298 			WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4299 			clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4300 				((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4301 		}
4302 		mutex_unlock(&adev->gfx.gpu_clock_mutex);
4303 		amdgpu_gfx_off_ctrl(adev, true);
4304 		break;
4305 	}
4306 	return clock;
4307 }
4308 
4309 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4310 					  uint32_t vmid,
4311 					  uint32_t gds_base, uint32_t gds_size,
4312 					  uint32_t gws_base, uint32_t gws_size,
4313 					  uint32_t oa_base, uint32_t oa_size)
4314 {
4315 	struct amdgpu_device *adev = ring->adev;
4316 
4317 	/* GDS Base */
4318 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4319 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4320 				   gds_base);
4321 
4322 	/* GDS Size */
4323 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4324 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4325 				   gds_size);
4326 
4327 	/* GWS */
4328 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4329 				   SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4330 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4331 
4332 	/* OA */
4333 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4334 				   SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4335 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
4336 }
4337 
4338 static const u32 vgpr_init_compute_shader[] =
4339 {
4340 	0xb07c0000, 0xbe8000ff,
4341 	0x000000f8, 0xbf110800,
4342 	0x7e000280, 0x7e020280,
4343 	0x7e040280, 0x7e060280,
4344 	0x7e080280, 0x7e0a0280,
4345 	0x7e0c0280, 0x7e0e0280,
4346 	0x80808800, 0xbe803200,
4347 	0xbf84fff5, 0xbf9c0000,
4348 	0xd28c0001, 0x0001007f,
4349 	0xd28d0001, 0x0002027e,
4350 	0x10020288, 0xb8810904,
4351 	0xb7814000, 0xd1196a01,
4352 	0x00000301, 0xbe800087,
4353 	0xbefc00c1, 0xd89c4000,
4354 	0x00020201, 0xd89cc080,
4355 	0x00040401, 0x320202ff,
4356 	0x00000800, 0x80808100,
4357 	0xbf84fff8, 0x7e020280,
4358 	0xbf810000, 0x00000000,
4359 };
4360 
4361 static const u32 sgpr_init_compute_shader[] =
4362 {
4363 	0xb07c0000, 0xbe8000ff,
4364 	0x0000005f, 0xbee50080,
4365 	0xbe812c65, 0xbe822c65,
4366 	0xbe832c65, 0xbe842c65,
4367 	0xbe852c65, 0xb77c0005,
4368 	0x80808500, 0xbf84fff8,
4369 	0xbe800080, 0xbf810000,
4370 };
4371 
4372 static const u32 vgpr_init_compute_shader_arcturus[] = {
4373 	0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4374 	0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4375 	0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4376 	0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4377 	0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4378 	0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4379 	0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4380 	0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4381 	0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4382 	0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4383 	0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4384 	0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4385 	0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4386 	0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4387 	0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4388 	0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4389 	0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4390 	0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4391 	0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4392 	0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4393 	0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4394 	0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4395 	0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4396 	0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4397 	0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4398 	0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4399 	0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4400 	0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4401 	0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4402 	0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4403 	0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4404 	0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4405 	0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4406 	0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4407 	0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4408 	0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4409 	0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4410 	0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4411 	0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4412 	0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4413 	0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4414 	0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4415 	0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4416 	0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4417 	0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4418 	0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4419 	0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4420 	0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4421 	0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4422 	0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4423 	0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4424 	0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4425 	0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4426 	0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4427 	0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4428 	0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4429 	0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4430 	0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4431 	0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4432 	0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4433 	0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4434 	0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4435 	0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4436 	0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4437 	0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4438 	0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4439 	0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4440 	0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4441 	0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4442 	0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4443 	0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4444 	0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4445 	0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4446 	0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4447 	0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4448 	0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4449 	0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4450 	0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4451 	0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4452 	0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4453 	0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4454 	0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4455 	0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4456 	0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4457 	0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4458 	0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4459 	0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4460 	0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4461 	0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4462 	0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4463 	0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4464 	0xbf84fff8, 0xbf810000,
4465 };
4466 
4467 /* When below register arrays changed, please update gpr_reg_size,
4468   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4469   to cover all gfx9 ASICs */
4470 static const struct soc15_reg_entry vgpr_init_regs[] = {
4471    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4472    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4473    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4474    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4475    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4476    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4477    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4478    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4479    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4480    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4481    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4482    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4483    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4484    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4485 };
4486 
4487 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4488    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4489    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4490    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4491    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4492    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4493    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4494    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4495    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4496    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4497    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4498    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4499    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4500    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4501    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4502 };
4503 
4504 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4505    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4506    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4507    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4508    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4509    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4510    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4511    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4512    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4513    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4514    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4515    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4516    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4517    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4518    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4519 };
4520 
4521 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4522    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4523    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4524    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4525    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4526    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4527    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4528    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4529    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4530    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4531    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4532    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4533    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4534    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4535    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4536 };
4537 
4538 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4539    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4540    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4541    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4542    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4543    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4544    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4545    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4546    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4547    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4548    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4549    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4550    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4551    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4552    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4553    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4554    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4555    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4556    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4557    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4558    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4559    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4560    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4561    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4562    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4563    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4564    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4565    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4566    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4567    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4568    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4569    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4570    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4571    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4572 };
4573 
4574 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4575 {
4576 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4577 	int i, r;
4578 
4579 	/* only support when RAS is enabled */
4580 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4581 		return 0;
4582 
4583 	r = amdgpu_ring_alloc(ring, 7);
4584 	if (r) {
4585 		DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4586 			ring->name, r);
4587 		return r;
4588 	}
4589 
4590 	WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4591 	WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4592 
4593 	amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4594 	amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4595 				PACKET3_DMA_DATA_DST_SEL(1) |
4596 				PACKET3_DMA_DATA_SRC_SEL(2) |
4597 				PACKET3_DMA_DATA_ENGINE(0)));
4598 	amdgpu_ring_write(ring, 0);
4599 	amdgpu_ring_write(ring, 0);
4600 	amdgpu_ring_write(ring, 0);
4601 	amdgpu_ring_write(ring, 0);
4602 	amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4603 				adev->gds.gds_size);
4604 
4605 	amdgpu_ring_commit(ring);
4606 
4607 	for (i = 0; i < adev->usec_timeout; i++) {
4608 		if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4609 			break;
4610 		udelay(1);
4611 	}
4612 
4613 	if (i >= adev->usec_timeout)
4614 		r = -ETIMEDOUT;
4615 
4616 	WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4617 
4618 	return r;
4619 }
4620 
4621 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4622 {
4623 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4624 	struct amdgpu_ib ib;
4625 	struct dma_fence *f = NULL;
4626 	int r, i;
4627 	unsigned total_size, vgpr_offset, sgpr_offset;
4628 	u64 gpu_addr;
4629 
4630 	int compute_dim_x = adev->gfx.config.max_shader_engines *
4631 						adev->gfx.config.max_cu_per_sh *
4632 						adev->gfx.config.max_sh_per_se;
4633 	int sgpr_work_group_size = 5;
4634 	int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4635 	int vgpr_init_shader_size;
4636 	const u32 *vgpr_init_shader_ptr;
4637 	const struct soc15_reg_entry *vgpr_init_regs_ptr;
4638 
4639 	/* only support when RAS is enabled */
4640 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4641 		return 0;
4642 
4643 	/* bail if the compute ring is not ready */
4644 	if (!ring->sched.ready)
4645 		return 0;
4646 
4647 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
4648 		vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4649 		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4650 		vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4651 	} else {
4652 		vgpr_init_shader_ptr = vgpr_init_compute_shader;
4653 		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4654 		vgpr_init_regs_ptr = vgpr_init_regs;
4655 	}
4656 
4657 	total_size =
4658 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4659 	total_size +=
4660 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4661 	total_size +=
4662 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4663 	total_size = ALIGN(total_size, 256);
4664 	vgpr_offset = total_size;
4665 	total_size += ALIGN(vgpr_init_shader_size, 256);
4666 	sgpr_offset = total_size;
4667 	total_size += sizeof(sgpr_init_compute_shader);
4668 
4669 	/* allocate an indirect buffer to put the commands in */
4670 	memset(&ib, 0, sizeof(ib));
4671 	r = amdgpu_ib_get(adev, NULL, total_size,
4672 					AMDGPU_IB_POOL_DIRECT, &ib);
4673 	if (r) {
4674 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4675 		return r;
4676 	}
4677 
4678 	/* load the compute shaders */
4679 	for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4680 		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4681 
4682 	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4683 		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4684 
4685 	/* init the ib length to 0 */
4686 	ib.length_dw = 0;
4687 
4688 	/* VGPR */
4689 	/* write the register state for the compute dispatch */
4690 	for (i = 0; i < gpr_reg_size; i++) {
4691 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4692 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4693 								- PACKET3_SET_SH_REG_START;
4694 		ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4695 	}
4696 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4697 	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4698 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4699 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4700 							- PACKET3_SET_SH_REG_START;
4701 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4702 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4703 
4704 	/* write dispatch packet */
4705 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4706 	ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4707 	ib.ptr[ib.length_dw++] = 1; /* y */
4708 	ib.ptr[ib.length_dw++] = 1; /* z */
4709 	ib.ptr[ib.length_dw++] =
4710 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4711 
4712 	/* write CS partial flush packet */
4713 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4714 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4715 
4716 	/* SGPR1 */
4717 	/* write the register state for the compute dispatch */
4718 	for (i = 0; i < gpr_reg_size; i++) {
4719 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4720 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4721 								- PACKET3_SET_SH_REG_START;
4722 		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4723 	}
4724 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4725 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4726 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4727 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4728 							- PACKET3_SET_SH_REG_START;
4729 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4730 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4731 
4732 	/* write dispatch packet */
4733 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4734 	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4735 	ib.ptr[ib.length_dw++] = 1; /* y */
4736 	ib.ptr[ib.length_dw++] = 1; /* z */
4737 	ib.ptr[ib.length_dw++] =
4738 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4739 
4740 	/* write CS partial flush packet */
4741 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4742 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4743 
4744 	/* SGPR2 */
4745 	/* write the register state for the compute dispatch */
4746 	for (i = 0; i < gpr_reg_size; i++) {
4747 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4748 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4749 								- PACKET3_SET_SH_REG_START;
4750 		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4751 	}
4752 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4753 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4754 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4755 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4756 							- PACKET3_SET_SH_REG_START;
4757 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4758 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4759 
4760 	/* write dispatch packet */
4761 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4762 	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4763 	ib.ptr[ib.length_dw++] = 1; /* y */
4764 	ib.ptr[ib.length_dw++] = 1; /* z */
4765 	ib.ptr[ib.length_dw++] =
4766 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4767 
4768 	/* write CS partial flush packet */
4769 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4770 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4771 
4772 	/* shedule the ib on the ring */
4773 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4774 	if (r) {
4775 		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4776 		goto fail;
4777 	}
4778 
4779 	/* wait for the GPU to finish processing the IB */
4780 	r = dma_fence_wait(f, false);
4781 	if (r) {
4782 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4783 		goto fail;
4784 	}
4785 
4786 fail:
4787 	amdgpu_ib_free(&ib, NULL);
4788 	dma_fence_put(f);
4789 
4790 	return r;
4791 }
4792 
4793 static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block)
4794 {
4795 	struct amdgpu_device *adev = ip_block->adev;
4796 
4797 	adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
4798 
4799 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
4800 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4801 		adev->gfx.num_gfx_rings = 0;
4802 	else
4803 		adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4804 	adev->gfx.xcc_mask = 1;
4805 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4806 					  AMDGPU_MAX_COMPUTE_RINGS);
4807 	gfx_v9_0_set_kiq_pm4_funcs(adev);
4808 	gfx_v9_0_set_ring_funcs(adev);
4809 	gfx_v9_0_set_irq_funcs(adev);
4810 	gfx_v9_0_set_gds_init(adev);
4811 	gfx_v9_0_set_rlc_funcs(adev);
4812 
4813 	/* init rlcg reg access ctrl */
4814 	gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
4815 
4816 	return gfx_v9_0_init_microcode(adev);
4817 }
4818 
4819 static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block)
4820 {
4821 	struct amdgpu_device *adev = ip_block->adev;
4822 	int r;
4823 
4824 	/*
4825 	 * Temp workaround to fix the issue that CP firmware fails to
4826 	 * update read pointer when CPDMA is writing clearing operation
4827 	 * to GDS in suspend/resume sequence on several cards. So just
4828 	 * limit this operation in cold boot sequence.
4829 	 */
4830 	if ((!adev->in_suspend) &&
4831 	    (adev->gds.gds_size)) {
4832 		r = gfx_v9_0_do_edc_gds_workarounds(adev);
4833 		if (r)
4834 			return r;
4835 	}
4836 
4837 	/* requires IBs so do in late init after IB pool is initialized */
4838 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4839 		r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
4840 	else
4841 		r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4842 
4843 	if (r)
4844 		return r;
4845 
4846 	if (adev->gfx.ras &&
4847 	    adev->gfx.ras->enable_watchdog_timer)
4848 		adev->gfx.ras->enable_watchdog_timer(adev);
4849 
4850 	return 0;
4851 }
4852 
4853 static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block)
4854 {
4855 	struct amdgpu_device *adev = ip_block->adev;
4856 	int r;
4857 
4858 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4859 	if (r)
4860 		return r;
4861 
4862 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4863 	if (r)
4864 		return r;
4865 
4866 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
4867 	if (r)
4868 		return r;
4869 
4870 	r = gfx_v9_0_ecc_late_init(ip_block);
4871 	if (r)
4872 		return r;
4873 
4874 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4875 		gfx_v9_4_2_debug_trap_config_init(adev,
4876 			adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
4877 	else
4878 		gfx_v9_0_debug_trap_config_init(adev,
4879 			adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
4880 
4881 	return 0;
4882 }
4883 
4884 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4885 {
4886 	uint32_t rlc_setting;
4887 
4888 	/* if RLC is not enabled, do nothing */
4889 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4890 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4891 		return false;
4892 
4893 	return true;
4894 }
4895 
4896 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
4897 {
4898 	uint32_t data;
4899 	unsigned i;
4900 
4901 	data = RLC_SAFE_MODE__CMD_MASK;
4902 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4903 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4904 
4905 	/* wait for RLC_SAFE_MODE */
4906 	for (i = 0; i < adev->usec_timeout; i++) {
4907 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4908 			break;
4909 		udelay(1);
4910 	}
4911 }
4912 
4913 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
4914 {
4915 	uint32_t data;
4916 
4917 	data = RLC_SAFE_MODE__CMD_MASK;
4918 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4919 }
4920 
4921 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4922 						bool enable)
4923 {
4924 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4925 
4926 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4927 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4928 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4929 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4930 	} else {
4931 		gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4932 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4933 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4934 	}
4935 
4936 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4937 }
4938 
4939 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4940 						bool enable)
4941 {
4942 	/* TODO: double check if we need to perform under safe mode */
4943 	/* gfx_v9_0_enter_rlc_safe_mode(adev); */
4944 
4945 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4946 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4947 	else
4948 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4949 
4950 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4951 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4952 	else
4953 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4954 
4955 	/* gfx_v9_0_exit_rlc_safe_mode(adev); */
4956 }
4957 
4958 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4959 						      bool enable)
4960 {
4961 	uint32_t data, def;
4962 
4963 	/* It is disabled by HW by default */
4964 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4965 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
4966 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4967 
4968 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
4969 			data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4970 
4971 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4972 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4973 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4974 
4975 		/* only for Vega10 & Raven1 */
4976 		data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4977 
4978 		if (def != data)
4979 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4980 
4981 		/* MGLS is a global flag to control all MGLS in GFX */
4982 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4983 			/* 2 - RLC memory Light sleep */
4984 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4985 				def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4986 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4987 				if (def != data)
4988 					WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4989 			}
4990 			/* 3 - CP memory Light sleep */
4991 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4992 				def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4993 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4994 				if (def != data)
4995 					WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4996 			}
4997 		}
4998 	} else {
4999 		/* 1 - MGCG_OVERRIDE */
5000 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5001 
5002 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
5003 			data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
5004 
5005 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5006 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5007 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
5008 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
5009 
5010 		if (def != data)
5011 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5012 
5013 		/* 2 - disable MGLS in RLC */
5014 		data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
5015 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5016 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5017 			WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
5018 		}
5019 
5020 		/* 3 - disable MGLS in CP */
5021 		data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
5022 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5023 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5024 			WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
5025 		}
5026 	}
5027 }
5028 
5029 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
5030 					   bool enable)
5031 {
5032 	uint32_t data, def;
5033 
5034 	if (!adev->gfx.num_gfx_rings)
5035 		return;
5036 
5037 	/* Enable 3D CGCG/CGLS */
5038 	if (enable) {
5039 		/* write cmd to clear cgcg/cgls ov */
5040 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5041 		/* unset CGCG override */
5042 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5043 		/* update CGCG and CGLS override bits */
5044 		if (def != data)
5045 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5046 
5047 		/* enable 3Dcgcg FSM(0x0000363f) */
5048 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5049 
5050 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5051 			data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5052 				RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5053 		else
5054 			data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
5055 
5056 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5057 			data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5058 				RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5059 		if (def != data)
5060 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5061 
5062 		/* set IDLE_POLL_COUNT(0x00900100) */
5063 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5064 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5065 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5066 		if (def != data)
5067 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5068 	} else {
5069 		/* Disable CGCG/CGLS */
5070 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5071 		/* disable cgcg, cgls should be disabled */
5072 		data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
5073 			  RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
5074 		/* disable cgcg and cgls in FSM */
5075 		if (def != data)
5076 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5077 	}
5078 }
5079 
5080 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5081 						      bool enable)
5082 {
5083 	uint32_t def, data;
5084 
5085 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5086 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5087 		/* unset CGCG override */
5088 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5089 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5090 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5091 		else
5092 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5093 		/* update CGCG and CGLS override bits */
5094 		if (def != data)
5095 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5096 
5097 		/* enable cgcg FSM(0x0000363F) */
5098 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5099 
5100 		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))
5101 			data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5102 				RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5103 		else
5104 			data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5105 				RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5106 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5107 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5108 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5109 		if (def != data)
5110 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5111 
5112 		/* set IDLE_POLL_COUNT(0x00900100) */
5113 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5114 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5115 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5116 		if (def != data)
5117 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5118 	} else {
5119 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5120 		/* reset CGCG/CGLS bits */
5121 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5122 		/* disable cgcg and cgls in FSM */
5123 		if (def != data)
5124 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5125 	}
5126 }
5127 
5128 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5129 					    bool enable)
5130 {
5131 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5132 	if (enable) {
5133 		/* CGCG/CGLS should be enabled after MGCG/MGLS
5134 		 * ===  MGCG + MGLS ===
5135 		 */
5136 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5137 		/* ===  CGCG /CGLS for GFX 3D Only === */
5138 		gfx_v9_0_update_3d_clock_gating(adev, enable);
5139 		/* ===  CGCG + CGLS === */
5140 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5141 	} else {
5142 		/* CGCG/CGLS should be disabled before MGCG/MGLS
5143 		 * ===  CGCG + CGLS ===
5144 		 */
5145 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5146 		/* ===  CGCG /CGLS for GFX 3D Only === */
5147 		gfx_v9_0_update_3d_clock_gating(adev, enable);
5148 		/* ===  MGCG + MGLS === */
5149 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5150 	}
5151 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5152 	return 0;
5153 }
5154 
5155 static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
5156 					      unsigned int vmid)
5157 {
5158 	u32 reg, data;
5159 
5160 	reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5161 	if (amdgpu_sriov_is_pp_one_vf(adev))
5162 		data = RREG32_NO_KIQ(reg);
5163 	else
5164 		data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
5165 
5166 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5167 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5168 
5169 	if (amdgpu_sriov_is_pp_one_vf(adev))
5170 		WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5171 	else
5172 		WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5173 }
5174 
5175 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, int xcc_id,
5176 		struct amdgpu_ring *ring, unsigned int vmid)
5177 {
5178 	amdgpu_gfx_off_ctrl(adev, false);
5179 
5180 	gfx_v9_0_update_spm_vmid_internal(adev, vmid);
5181 
5182 	amdgpu_gfx_off_ctrl(adev, true);
5183 }
5184 
5185 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5186 					uint32_t offset,
5187 					struct soc15_reg_rlcg *entries, int arr_size)
5188 {
5189 	int i;
5190 	uint32_t reg;
5191 
5192 	if (!entries)
5193 		return false;
5194 
5195 	for (i = 0; i < arr_size; i++) {
5196 		const struct soc15_reg_rlcg *entry;
5197 
5198 		entry = &entries[i];
5199 		reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5200 		if (offset == reg)
5201 			return true;
5202 	}
5203 
5204 	return false;
5205 }
5206 
5207 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5208 {
5209 	return gfx_v9_0_check_rlcg_range(adev, offset,
5210 					(void *)rlcg_access_gc_9_0,
5211 					ARRAY_SIZE(rlcg_access_gc_9_0));
5212 }
5213 
5214 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5215 	.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5216 	.set_safe_mode = gfx_v9_0_set_safe_mode,
5217 	.unset_safe_mode = gfx_v9_0_unset_safe_mode,
5218 	.init = gfx_v9_0_rlc_init,
5219 	.get_csb_size = gfx_v9_0_get_csb_size,
5220 	.get_csb_buffer = gfx_v9_0_get_csb_buffer,
5221 	.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5222 	.resume = gfx_v9_0_rlc_resume,
5223 	.stop = gfx_v9_0_rlc_stop,
5224 	.reset = gfx_v9_0_rlc_reset,
5225 	.start = gfx_v9_0_rlc_start,
5226 	.update_spm_vmid = gfx_v9_0_update_spm_vmid,
5227 	.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5228 };
5229 
5230 static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
5231 					  enum amd_powergating_state state)
5232 {
5233 	struct amdgpu_device *adev = ip_block->adev;
5234 	bool enable = (state == AMD_PG_STATE_GATE);
5235 
5236 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5237 	case IP_VERSION(9, 2, 2):
5238 	case IP_VERSION(9, 1, 0):
5239 	case IP_VERSION(9, 3, 0):
5240 		if (!enable)
5241 			amdgpu_gfx_off_ctrl_immediate(adev, false);
5242 
5243 		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5244 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5245 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5246 		} else {
5247 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5248 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5249 		}
5250 
5251 		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5252 			gfx_v9_0_enable_cp_power_gating(adev, true);
5253 		else
5254 			gfx_v9_0_enable_cp_power_gating(adev, false);
5255 
5256 		/* update gfx cgpg state */
5257 		gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5258 
5259 		/* update mgcg state */
5260 		gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5261 
5262 		if (enable)
5263 			amdgpu_gfx_off_ctrl_immediate(adev, true);
5264 		break;
5265 	case IP_VERSION(9, 2, 1):
5266 		amdgpu_gfx_off_ctrl_immediate(adev, enable);
5267 		break;
5268 	default:
5269 		break;
5270 	}
5271 
5272 	return 0;
5273 }
5274 
5275 static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
5276 					  enum amd_clockgating_state state)
5277 {
5278 	struct amdgpu_device *adev = ip_block->adev;
5279 
5280 	if (amdgpu_sriov_vf(adev))
5281 		return 0;
5282 
5283 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5284 	case IP_VERSION(9, 0, 1):
5285 	case IP_VERSION(9, 2, 1):
5286 	case IP_VERSION(9, 4, 0):
5287 	case IP_VERSION(9, 2, 2):
5288 	case IP_VERSION(9, 1, 0):
5289 	case IP_VERSION(9, 4, 1):
5290 	case IP_VERSION(9, 3, 0):
5291 	case IP_VERSION(9, 4, 2):
5292 		gfx_v9_0_update_gfx_clock_gating(adev,
5293 						 state == AMD_CG_STATE_GATE);
5294 		break;
5295 	default:
5296 		break;
5297 	}
5298 	return 0;
5299 }
5300 
5301 static void gfx_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
5302 {
5303 	struct amdgpu_device *adev = ip_block->adev;
5304 	int data;
5305 
5306 	if (amdgpu_sriov_vf(adev))
5307 		*flags = 0;
5308 
5309 	/* AMD_CG_SUPPORT_GFX_MGCG */
5310 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5311 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5312 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5313 
5314 	/* AMD_CG_SUPPORT_GFX_CGCG */
5315 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5316 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5317 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5318 
5319 	/* AMD_CG_SUPPORT_GFX_CGLS */
5320 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5321 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5322 
5323 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
5324 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5325 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5326 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5327 
5328 	/* AMD_CG_SUPPORT_GFX_CP_LS */
5329 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5330 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5331 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5332 
5333 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) {
5334 		/* AMD_CG_SUPPORT_GFX_3D_CGCG */
5335 		data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5336 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5337 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5338 
5339 		/* AMD_CG_SUPPORT_GFX_3D_CGLS */
5340 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5341 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5342 	}
5343 }
5344 
5345 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5346 {
5347 	return *ring->rptr_cpu_addr; /* gfx9 is 32bit rptr*/
5348 }
5349 
5350 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5351 {
5352 	struct amdgpu_device *adev = ring->adev;
5353 	u64 wptr;
5354 
5355 	/* XXX check if swapping is necessary on BE */
5356 	if (ring->use_doorbell) {
5357 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5358 	} else {
5359 		wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5360 		wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5361 	}
5362 
5363 	return wptr;
5364 }
5365 
5366 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5367 {
5368 	struct amdgpu_device *adev = ring->adev;
5369 
5370 	if (ring->use_doorbell) {
5371 		/* XXX check if swapping is necessary on BE */
5372 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5373 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5374 	} else {
5375 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5376 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5377 	}
5378 }
5379 
5380 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5381 {
5382 	struct amdgpu_device *adev = ring->adev;
5383 	u32 ref_and_mask, reg_mem_engine;
5384 
5385 	if (!adev->gfx.funcs->get_hdp_flush_mask) {
5386 		dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
5387 		return;
5388 	}
5389 
5390 	adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
5391 	gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5392 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5393 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5394 			      ref_and_mask, ref_and_mask, 0x20);
5395 }
5396 
5397 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5398 					struct amdgpu_job *job,
5399 					struct amdgpu_ib *ib,
5400 					uint32_t flags)
5401 {
5402 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5403 	u32 header, control = 0;
5404 
5405 	if (ib->flags & AMDGPU_IB_FLAG_CE)
5406 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5407 	else
5408 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5409 
5410 	control |= ib->length_dw | (vmid << 24);
5411 
5412 	if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
5413 		control |= INDIRECT_BUFFER_PRE_ENB(1);
5414 
5415 		if (flags & AMDGPU_IB_PREEMPTED)
5416 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
5417 
5418 		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5419 			gfx_v9_0_ring_emit_de_meta(ring,
5420 						   (!amdgpu_sriov_vf(ring->adev) &&
5421 						   flags & AMDGPU_IB_PREEMPTED) ?
5422 						   true : false,
5423 						   job->gds_size > 0 && job->gds_base != 0);
5424 	}
5425 
5426 	amdgpu_ring_write(ring, header);
5427 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5428 	amdgpu_ring_write(ring,
5429 #ifdef __BIG_ENDIAN
5430 		(2 << 0) |
5431 #endif
5432 		lower_32_bits(ib->gpu_addr));
5433 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5434 	amdgpu_ring_ib_on_emit_cntl(ring);
5435 	amdgpu_ring_write(ring, control);
5436 }
5437 
5438 static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
5439 				     unsigned offset)
5440 {
5441 	u32 control = ring->ring[offset];
5442 
5443 	control |= INDIRECT_BUFFER_PRE_RESUME(1);
5444 	ring->ring[offset] = control;
5445 }
5446 
5447 static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
5448 					unsigned offset)
5449 {
5450 	struct amdgpu_device *adev = ring->adev;
5451 	void *ce_payload_cpu_addr;
5452 	uint64_t payload_offset, payload_size;
5453 
5454 	payload_size = sizeof(struct v9_ce_ib_state);
5455 
5456 	payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
5457 	ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
5458 
5459 	if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
5460 		memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
5461 	} else {
5462 		memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
5463 		       (ring->buf_mask + 1 - offset) << 2);
5464 		payload_size -= (ring->buf_mask + 1 - offset) << 2;
5465 		memcpy((void *)&ring->ring[0],
5466 		       ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
5467 		       payload_size);
5468 	}
5469 }
5470 
5471 static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
5472 					unsigned offset)
5473 {
5474 	struct amdgpu_device *adev = ring->adev;
5475 	void *de_payload_cpu_addr;
5476 	uint64_t payload_offset, payload_size;
5477 
5478 	payload_size = sizeof(struct v9_de_ib_state);
5479 
5480 	payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
5481 	de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
5482 
5483 	((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
5484 		IB_COMPLETION_STATUS_PREEMPTED;
5485 
5486 	if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
5487 		memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
5488 	} else {
5489 		memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
5490 		       (ring->buf_mask + 1 - offset) << 2);
5491 		payload_size -= (ring->buf_mask + 1 - offset) << 2;
5492 		memcpy((void *)&ring->ring[0],
5493 		       de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
5494 		       payload_size);
5495 	}
5496 }
5497 
5498 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5499 					  struct amdgpu_job *job,
5500 					  struct amdgpu_ib *ib,
5501 					  uint32_t flags)
5502 {
5503 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5504 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5505 
5506 	/* Currently, there is a high possibility to get wave ID mismatch
5507 	 * between ME and GDS, leading to a hw deadlock, because ME generates
5508 	 * different wave IDs than the GDS expects. This situation happens
5509 	 * randomly when at least 5 compute pipes use GDS ordered append.
5510 	 * The wave IDs generated by ME are also wrong after suspend/resume.
5511 	 * Those are probably bugs somewhere else in the kernel driver.
5512 	 *
5513 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5514 	 * GDS to 0 for this ring (me/pipe).
5515 	 */
5516 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5517 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5518 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5519 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5520 	}
5521 
5522 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5523 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5524 	amdgpu_ring_write(ring,
5525 #ifdef __BIG_ENDIAN
5526 				(2 << 0) |
5527 #endif
5528 				lower_32_bits(ib->gpu_addr));
5529 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5530 	amdgpu_ring_write(ring, control);
5531 }
5532 
5533 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5534 				     u64 seq, unsigned flags)
5535 {
5536 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5537 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5538 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5539 	bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
5540 	uint32_t dw2 = 0;
5541 
5542 	/* RELEASE_MEM - flush caches, send int */
5543 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5544 
5545 	if (writeback) {
5546 		dw2 = EOP_TC_NC_ACTION_EN;
5547 	} else {
5548 		dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN |
5549 				EOP_TC_MD_ACTION_EN;
5550 	}
5551 	dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5552 				EVENT_INDEX(5);
5553 	if (exec)
5554 		dw2 |= EOP_EXEC;
5555 
5556 	amdgpu_ring_write(ring, dw2);
5557 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5558 
5559 	/*
5560 	 * the address should be Qword aligned if 64bit write, Dword
5561 	 * aligned if only send 32bit data low (discard data high)
5562 	 */
5563 	if (write64bit)
5564 		BUG_ON(addr & 0x7);
5565 	else
5566 		BUG_ON(addr & 0x3);
5567 	amdgpu_ring_write(ring, lower_32_bits(addr));
5568 	amdgpu_ring_write(ring, upper_32_bits(addr));
5569 	amdgpu_ring_write(ring, lower_32_bits(seq));
5570 	amdgpu_ring_write(ring, upper_32_bits(seq));
5571 	amdgpu_ring_write(ring, 0);
5572 }
5573 
5574 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5575 {
5576 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5577 	uint32_t seq = ring->fence_drv.sync_seq;
5578 	uint64_t addr = ring->fence_drv.gpu_addr;
5579 
5580 	gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5581 			      lower_32_bits(addr), upper_32_bits(addr),
5582 			      seq, 0xffffffff, 4);
5583 }
5584 
5585 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5586 					unsigned vmid, uint64_t pd_addr)
5587 {
5588 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5589 
5590 	/* compute doesn't have PFP */
5591 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5592 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
5593 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5594 		amdgpu_ring_write(ring, 0x0);
5595 	}
5596 }
5597 
5598 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5599 {
5600 	return *ring->rptr_cpu_addr; /* gfx9 hardware is 32bit rptr */
5601 }
5602 
5603 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5604 {
5605 	u64 wptr;
5606 
5607 	/* XXX check if swapping is necessary on BE */
5608 	if (ring->use_doorbell)
5609 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5610 	else
5611 		BUG();
5612 	return wptr;
5613 }
5614 
5615 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5616 {
5617 	struct amdgpu_device *adev = ring->adev;
5618 
5619 	/* XXX check if swapping is necessary on BE */
5620 	if (ring->use_doorbell) {
5621 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5622 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5623 	} else{
5624 		BUG(); /* only DOORBELL method supported on gfx9 now */
5625 	}
5626 }
5627 
5628 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5629 					 u64 seq, unsigned int flags)
5630 {
5631 	struct amdgpu_device *adev = ring->adev;
5632 
5633 	/* we only allocate 32bit for each seq wb address */
5634 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5635 
5636 	/* write fence seq to the "addr" */
5637 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5638 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5639 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5640 	amdgpu_ring_write(ring, lower_32_bits(addr));
5641 	amdgpu_ring_write(ring, upper_32_bits(addr));
5642 	amdgpu_ring_write(ring, lower_32_bits(seq));
5643 
5644 	if (flags & AMDGPU_FENCE_FLAG_INT) {
5645 		/* set register to trigger INT */
5646 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5647 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5648 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5649 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5650 		amdgpu_ring_write(ring, 0);
5651 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5652 	}
5653 }
5654 
5655 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5656 {
5657 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5658 	amdgpu_ring_write(ring, 0);
5659 }
5660 
5661 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
5662 {
5663 	struct amdgpu_device *adev = ring->adev;
5664 	struct v9_ce_ib_state ce_payload = {0};
5665 	uint64_t offset, ce_payload_gpu_addr;
5666 	void *ce_payload_cpu_addr;
5667 	int cnt;
5668 
5669 	cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5670 
5671 	offset = offsetof(struct v9_gfx_meta_data, ce_payload);
5672 	ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5673 	ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5674 
5675 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5676 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5677 				 WRITE_DATA_DST_SEL(8) |
5678 				 WR_CONFIRM) |
5679 				 WRITE_DATA_CACHE_POLICY(0));
5680 	amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
5681 	amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));
5682 
5683 	amdgpu_ring_ib_on_emit_ce(ring);
5684 
5685 	if (resume)
5686 		amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
5687 					   sizeof(ce_payload) >> 2);
5688 	else
5689 		amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
5690 					   sizeof(ce_payload) >> 2);
5691 }
5692 
5693 static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
5694 {
5695 	int i, r = 0;
5696 	struct amdgpu_device *adev = ring->adev;
5697 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5698 	struct amdgpu_ring *kiq_ring = &kiq->ring;
5699 	unsigned long flags;
5700 
5701 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5702 		return -EINVAL;
5703 
5704 	spin_lock_irqsave(&kiq->ring_lock, flags);
5705 
5706 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5707 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
5708 		return -ENOMEM;
5709 	}
5710 
5711 	/* assert preemption condition */
5712 	amdgpu_ring_set_preempt_cond_exec(ring, false);
5713 
5714 	ring->trail_seq += 1;
5715 	amdgpu_ring_alloc(ring, 13);
5716 	gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
5717 				 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);
5718 
5719 	/* assert IB preemption, emit the trailing fence */
5720 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5721 				   ring->trail_fence_gpu_addr,
5722 				   ring->trail_seq);
5723 
5724 	amdgpu_ring_commit(kiq_ring);
5725 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
5726 
5727 	/* poll the trailing fence */
5728 	for (i = 0; i < adev->usec_timeout; i++) {
5729 		if (ring->trail_seq ==
5730 			le32_to_cpu(*ring->trail_fence_cpu_addr))
5731 			break;
5732 		udelay(1);
5733 	}
5734 
5735 	if (i >= adev->usec_timeout) {
5736 		r = -EINVAL;
5737 		DRM_WARN("ring %d timeout to preempt ib\n", ring->idx);
5738 	}
5739 
5740 	/*reset the CP_VMID_PREEMPT after trailing fence*/
5741 	amdgpu_ring_emit_wreg(ring,
5742 			      SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
5743 			      0x0);
5744 	amdgpu_ring_commit(ring);
5745 
5746 	/* deassert preemption condition */
5747 	amdgpu_ring_set_preempt_cond_exec(ring, true);
5748 	return r;
5749 }
5750 
5751 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
5752 {
5753 	struct amdgpu_device *adev = ring->adev;
5754 	struct v9_de_ib_state de_payload = {0};
5755 	uint64_t offset, gds_addr, de_payload_gpu_addr;
5756 	void *de_payload_cpu_addr;
5757 	int cnt;
5758 
5759 	offset = offsetof(struct v9_gfx_meta_data, de_payload);
5760 	de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5761 	de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5762 
5763 	gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5764 			 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5765 			 PAGE_SIZE);
5766 
5767 	if (usegds) {
5768 		de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5769 		de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5770 	}
5771 
5772 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5773 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5774 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5775 				 WRITE_DATA_DST_SEL(8) |
5776 				 WR_CONFIRM) |
5777 				 WRITE_DATA_CACHE_POLICY(0));
5778 	amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5779 	amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5780 
5781 	amdgpu_ring_ib_on_emit_de(ring);
5782 	if (resume)
5783 		amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5784 					   sizeof(de_payload) >> 2);
5785 	else
5786 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5787 					   sizeof(de_payload) >> 2);
5788 }
5789 
5790 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5791 				   bool secure)
5792 {
5793 	uint32_t v = secure ? FRAME_TMZ : 0;
5794 
5795 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5796 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5797 }
5798 
5799 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5800 {
5801 	uint32_t dw2 = 0;
5802 
5803 	gfx_v9_0_ring_emit_ce_meta(ring,
5804 				   (!amdgpu_sriov_vf(ring->adev) &&
5805 				   flags & AMDGPU_IB_PREEMPTED) ? true : false);
5806 
5807 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5808 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5809 		/* set load_global_config & load_global_uconfig */
5810 		dw2 |= 0x8001;
5811 		/* set load_cs_sh_regs */
5812 		dw2 |= 0x01000000;
5813 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
5814 		dw2 |= 0x10002;
5815 
5816 		/* set load_ce_ram if preamble presented */
5817 		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5818 			dw2 |= 0x10000000;
5819 	} else {
5820 		/* still load_ce_ram if this is the first time preamble presented
5821 		 * although there is no context switch happens.
5822 		 */
5823 		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5824 			dw2 |= 0x10000000;
5825 	}
5826 
5827 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5828 	amdgpu_ring_write(ring, dw2);
5829 	amdgpu_ring_write(ring, 0);
5830 }
5831 
5832 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
5833 						  uint64_t addr)
5834 {
5835 	unsigned ret;
5836 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5837 	amdgpu_ring_write(ring, lower_32_bits(addr));
5838 	amdgpu_ring_write(ring, upper_32_bits(addr));
5839 	/* discard following DWs if *cond_exec_gpu_addr==0 */
5840 	amdgpu_ring_write(ring, 0);
5841 	ret = ring->wptr & ring->buf_mask;
5842 	/* patch dummy value later */
5843 	amdgpu_ring_write(ring, 0);
5844 	return ret;
5845 }
5846 
5847 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5848 				    uint32_t reg_val_offs)
5849 {
5850 	struct amdgpu_device *adev = ring->adev;
5851 
5852 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5853 	amdgpu_ring_write(ring, 0 |	/* src: register*/
5854 				(5 << 8) |	/* dst: memory */
5855 				(1 << 20));	/* write confirm */
5856 	amdgpu_ring_write(ring, reg);
5857 	amdgpu_ring_write(ring, 0);
5858 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5859 				reg_val_offs * 4));
5860 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5861 				reg_val_offs * 4));
5862 }
5863 
5864 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5865 				    uint32_t val)
5866 {
5867 	uint32_t cmd = 0;
5868 
5869 	switch (ring->funcs->type) {
5870 	case AMDGPU_RING_TYPE_GFX:
5871 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5872 		break;
5873 	case AMDGPU_RING_TYPE_KIQ:
5874 		cmd = (1 << 16); /* no inc addr */
5875 		break;
5876 	default:
5877 		cmd = WR_CONFIRM;
5878 		break;
5879 	}
5880 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5881 	amdgpu_ring_write(ring, cmd);
5882 	amdgpu_ring_write(ring, reg);
5883 	amdgpu_ring_write(ring, 0);
5884 	amdgpu_ring_write(ring, val);
5885 }
5886 
5887 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5888 					uint32_t val, uint32_t mask)
5889 {
5890 	gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5891 }
5892 
5893 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5894 						  uint32_t reg0, uint32_t reg1,
5895 						  uint32_t ref, uint32_t mask)
5896 {
5897 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5898 	struct amdgpu_device *adev = ring->adev;
5899 	bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5900 		adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5901 
5902 	if (fw_version_ok)
5903 		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5904 				      ref, mask, 0x20);
5905 	else
5906 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5907 							   ref, mask);
5908 }
5909 
5910 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5911 {
5912 	struct amdgpu_device *adev = ring->adev;
5913 	uint32_t value = 0;
5914 
5915 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5916 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5917 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5918 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5919 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5920 	WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5921 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5922 }
5923 
5924 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5925 						 enum amdgpu_interrupt_state state)
5926 {
5927 	switch (state) {
5928 	case AMDGPU_IRQ_STATE_DISABLE:
5929 	case AMDGPU_IRQ_STATE_ENABLE:
5930 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5931 			       TIME_STAMP_INT_ENABLE,
5932 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5933 		break;
5934 	default:
5935 		break;
5936 	}
5937 }
5938 
5939 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5940 						     int me, int pipe,
5941 						     enum amdgpu_interrupt_state state)
5942 {
5943 	u32 mec_int_cntl, mec_int_cntl_reg;
5944 
5945 	/*
5946 	 * amdgpu controls only the first MEC. That's why this function only
5947 	 * handles the setting of interrupts for this specific MEC. All other
5948 	 * pipes' interrupts are set by amdkfd.
5949 	 */
5950 
5951 	if (me == 1) {
5952 		switch (pipe) {
5953 		case 0:
5954 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5955 			break;
5956 		case 1:
5957 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5958 			break;
5959 		case 2:
5960 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5961 			break;
5962 		case 3:
5963 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5964 			break;
5965 		default:
5966 			DRM_DEBUG("invalid pipe %d\n", pipe);
5967 			return;
5968 		}
5969 	} else {
5970 		DRM_DEBUG("invalid me %d\n", me);
5971 		return;
5972 	}
5973 
5974 	switch (state) {
5975 	case AMDGPU_IRQ_STATE_DISABLE:
5976 		mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
5977 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5978 					     TIME_STAMP_INT_ENABLE, 0);
5979 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5980 		break;
5981 	case AMDGPU_IRQ_STATE_ENABLE:
5982 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5983 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5984 					     TIME_STAMP_INT_ENABLE, 1);
5985 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5986 		break;
5987 	default:
5988 		break;
5989 	}
5990 }
5991 
5992 static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev,
5993 				     int me, int pipe)
5994 {
5995 	/*
5996 	 * amdgpu controls only the first MEC. That's why this function only
5997 	 * handles the setting of interrupts for this specific MEC. All other
5998 	 * pipes' interrupts are set by amdkfd.
5999 	 */
6000 	if (me != 1)
6001 		return 0;
6002 
6003 	switch (pipe) {
6004 	case 0:
6005 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
6006 	case 1:
6007 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
6008 	case 2:
6009 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
6010 	case 3:
6011 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
6012 	default:
6013 		return 0;
6014 	}
6015 }
6016 
6017 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6018 					     struct amdgpu_irq_src *source,
6019 					     unsigned type,
6020 					     enum amdgpu_interrupt_state state)
6021 {
6022 	u32 cp_int_cntl_reg, cp_int_cntl;
6023 	int i, j;
6024 
6025 	switch (state) {
6026 	case AMDGPU_IRQ_STATE_DISABLE:
6027 	case AMDGPU_IRQ_STATE_ENABLE:
6028 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6029 			       PRIV_REG_INT_ENABLE,
6030 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6031 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6032 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6033 				/* MECs start at 1 */
6034 				cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
6035 
6036 				if (cp_int_cntl_reg) {
6037 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6038 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6039 								    PRIV_REG_INT_ENABLE,
6040 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6041 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6042 				}
6043 			}
6044 		}
6045 		break;
6046 	default:
6047 		break;
6048 	}
6049 
6050 	return 0;
6051 }
6052 
6053 static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev,
6054 					   struct amdgpu_irq_src *source,
6055 					   unsigned type,
6056 					   enum amdgpu_interrupt_state state)
6057 {
6058 	u32 cp_int_cntl_reg, cp_int_cntl;
6059 	int i, j;
6060 
6061 	switch (state) {
6062 	case AMDGPU_IRQ_STATE_DISABLE:
6063 	case AMDGPU_IRQ_STATE_ENABLE:
6064 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6065 			       OPCODE_ERROR_INT_ENABLE,
6066 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6067 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6068 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6069 				/* MECs start at 1 */
6070 				cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
6071 
6072 				if (cp_int_cntl_reg) {
6073 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6074 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6075 								    OPCODE_ERROR_INT_ENABLE,
6076 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6077 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6078 				}
6079 			}
6080 		}
6081 		break;
6082 	default:
6083 		break;
6084 	}
6085 
6086 	return 0;
6087 }
6088 
6089 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6090 					      struct amdgpu_irq_src *source,
6091 					      unsigned type,
6092 					      enum amdgpu_interrupt_state state)
6093 {
6094 	switch (state) {
6095 	case AMDGPU_IRQ_STATE_DISABLE:
6096 	case AMDGPU_IRQ_STATE_ENABLE:
6097 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6098 			       PRIV_INSTR_INT_ENABLE,
6099 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6100 		break;
6101 	default:
6102 		break;
6103 	}
6104 
6105 	return 0;
6106 }
6107 
6108 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)				\
6109 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
6110 			CP_ECC_ERROR_INT_ENABLE, 1)
6111 
6112 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)			\
6113 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
6114 			CP_ECC_ERROR_INT_ENABLE, 0)
6115 
6116 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
6117 					      struct amdgpu_irq_src *source,
6118 					      unsigned type,
6119 					      enum amdgpu_interrupt_state state)
6120 {
6121 	switch (state) {
6122 	case AMDGPU_IRQ_STATE_DISABLE:
6123 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6124 				CP_ECC_ERROR_INT_ENABLE, 0);
6125 		DISABLE_ECC_ON_ME_PIPE(1, 0);
6126 		DISABLE_ECC_ON_ME_PIPE(1, 1);
6127 		DISABLE_ECC_ON_ME_PIPE(1, 2);
6128 		DISABLE_ECC_ON_ME_PIPE(1, 3);
6129 		break;
6130 
6131 	case AMDGPU_IRQ_STATE_ENABLE:
6132 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6133 				CP_ECC_ERROR_INT_ENABLE, 1);
6134 		ENABLE_ECC_ON_ME_PIPE(1, 0);
6135 		ENABLE_ECC_ON_ME_PIPE(1, 1);
6136 		ENABLE_ECC_ON_ME_PIPE(1, 2);
6137 		ENABLE_ECC_ON_ME_PIPE(1, 3);
6138 		break;
6139 	default:
6140 		break;
6141 	}
6142 
6143 	return 0;
6144 }
6145 
6146 
6147 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6148 					    struct amdgpu_irq_src *src,
6149 					    unsigned type,
6150 					    enum amdgpu_interrupt_state state)
6151 {
6152 	switch (type) {
6153 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6154 		gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
6155 		break;
6156 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6157 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6158 		break;
6159 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6160 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6161 		break;
6162 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6163 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6164 		break;
6165 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6166 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6167 		break;
6168 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6169 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6170 		break;
6171 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6172 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6173 		break;
6174 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6175 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6176 		break;
6177 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6178 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6179 		break;
6180 	default:
6181 		break;
6182 	}
6183 	return 0;
6184 }
6185 
6186 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
6187 			    struct amdgpu_irq_src *source,
6188 			    struct amdgpu_iv_entry *entry)
6189 {
6190 	int i;
6191 	u8 me_id, pipe_id, queue_id;
6192 	struct amdgpu_ring *ring;
6193 
6194 	DRM_DEBUG("IH: CP EOP\n");
6195 	me_id = (entry->ring_id & 0x0c) >> 2;
6196 	pipe_id = (entry->ring_id & 0x03) >> 0;
6197 	queue_id = (entry->ring_id & 0x70) >> 4;
6198 
6199 	switch (me_id) {
6200 	case 0:
6201 		if (adev->gfx.num_gfx_rings) {
6202 			if (!adev->gfx.mcbp) {
6203 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6204 			} else if (!amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) {
6205 				/* Fence signals are handled on the software rings*/
6206 				for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
6207 					amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]);
6208 			}
6209 		}
6210 		break;
6211 	case 1:
6212 	case 2:
6213 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6214 			ring = &adev->gfx.compute_ring[i];
6215 			/* Per-queue interrupt is supported for MEC starting from VI.
6216 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
6217 			  */
6218 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6219 				amdgpu_fence_process(ring);
6220 		}
6221 		break;
6222 	}
6223 	return 0;
6224 }
6225 
6226 static void gfx_v9_0_fault(struct amdgpu_device *adev,
6227 			   struct amdgpu_iv_entry *entry)
6228 {
6229 	u8 me_id, pipe_id, queue_id;
6230 	struct amdgpu_ring *ring;
6231 	int i;
6232 
6233 	me_id = (entry->ring_id & 0x0c) >> 2;
6234 	pipe_id = (entry->ring_id & 0x03) >> 0;
6235 	queue_id = (entry->ring_id & 0x70) >> 4;
6236 
6237 	switch (me_id) {
6238 	case 0:
6239 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6240 		break;
6241 	case 1:
6242 	case 2:
6243 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6244 			ring = &adev->gfx.compute_ring[i];
6245 			if (ring->me == me_id && ring->pipe == pipe_id &&
6246 			    ring->queue == queue_id)
6247 				drm_sched_fault(&ring->sched);
6248 		}
6249 		break;
6250 	}
6251 }
6252 
6253 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
6254 				 struct amdgpu_irq_src *source,
6255 				 struct amdgpu_iv_entry *entry)
6256 {
6257 	DRM_ERROR("Illegal register access in command stream\n");
6258 	gfx_v9_0_fault(adev, entry);
6259 	return 0;
6260 }
6261 
6262 static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev,
6263 			       struct amdgpu_irq_src *source,
6264 			       struct amdgpu_iv_entry *entry)
6265 {
6266 	DRM_ERROR("Illegal opcode in command stream\n");
6267 	gfx_v9_0_fault(adev, entry);
6268 	return 0;
6269 }
6270 
6271 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
6272 				  struct amdgpu_irq_src *source,
6273 				  struct amdgpu_iv_entry *entry)
6274 {
6275 	DRM_ERROR("Illegal instruction in command stream\n");
6276 	gfx_v9_0_fault(adev, entry);
6277 	return 0;
6278 }
6279 
6280 
6281 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
6282 	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
6283 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
6284 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
6285 	},
6286 	{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
6287 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
6288 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
6289 	},
6290 	{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
6291 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
6292 	  0, 0
6293 	},
6294 	{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
6295 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
6296 	  0, 0
6297 	},
6298 	{ "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
6299 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
6300 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
6301 	},
6302 	{ "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
6303 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
6304 	  0, 0
6305 	},
6306 	{ "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
6307 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
6308 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
6309 	},
6310 	{ "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
6311 	  SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
6312 	  SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
6313 	},
6314 	{ "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
6315 	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
6316 	  0, 0
6317 	},
6318 	{ "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
6319 	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
6320 	  0, 0
6321 	},
6322 	{ "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
6323 	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
6324 	  0, 0
6325 	},
6326 	{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6327 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
6328 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
6329 	},
6330 	{ "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6331 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
6332 	  0, 0
6333 	},
6334 	{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6335 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
6336 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
6337 	},
6338 	{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
6339 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6340 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
6341 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
6342 	},
6343 	{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
6344 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6345 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
6346 	  0, 0
6347 	},
6348 	{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
6349 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6350 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
6351 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
6352 	},
6353 	{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
6354 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6355 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
6356 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
6357 	},
6358 	{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
6359 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6360 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
6361 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
6362 	},
6363 	{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
6364 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6365 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
6366 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
6367 	},
6368 	{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
6369 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
6370 	  0, 0
6371 	},
6372 	{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6373 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
6374 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
6375 	},
6376 	{ "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6377 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6378 	  0, 0
6379 	},
6380 	{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6381 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6382 	  0, 0
6383 	},
6384 	{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6385 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6386 	  0, 0
6387 	},
6388 	{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6389 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6390 	  0, 0
6391 	},
6392 	{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6393 	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6394 	  0, 0
6395 	},
6396 	{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6397 	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6398 	  0, 0
6399 	},
6400 	{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6401 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6402 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6403 	},
6404 	{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6405 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6406 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6407 	},
6408 	{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6409 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6410 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6411 	},
6412 	{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6413 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6414 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6415 	},
6416 	{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6417 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6418 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6419 	},
6420 	{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6421 	  SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6422 	  0, 0
6423 	},
6424 	{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6425 	  SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6426 	  0, 0
6427 	},
6428 	{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6429 	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6430 	  0, 0
6431 	},
6432 	{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6433 	  SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6434 	  0, 0
6435 	},
6436 	{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6437 	  SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6438 	  0, 0
6439 	},
6440 	{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6441 	  SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6442 	  0, 0
6443 	},
6444 	{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6445 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6446 	  0, 0
6447 	},
6448 	{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6449 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6450 	  0, 0
6451 	},
6452 	{ "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6453 	  SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6454 	  0, 0
6455 	},
6456 	{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6457 	  SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6458 	  0, 0
6459 	},
6460 	{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6461 	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6462 	  0, 0
6463 	},
6464 	{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6465 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6466 	  0, 0
6467 	},
6468 	{ "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6469 	  SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6470 	  0, 0
6471 	},
6472 	{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6473 	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6474 	  0, 0
6475 	},
6476 	{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6477 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6478 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6479 	},
6480 	{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6481 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6482 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6483 	},
6484 	{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6485 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6486 	  0, 0
6487 	},
6488 	{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6489 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6490 	  0, 0
6491 	},
6492 	{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6493 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6494 	  0, 0
6495 	},
6496 	{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6497 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6498 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6499 	},
6500 	{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6501 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6502 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6503 	},
6504 	{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6505 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6506 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6507 	},
6508 	{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6509 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6510 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6511 	},
6512 	{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6513 	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6514 	  0, 0
6515 	},
6516 	{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6517 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6518 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6519 	},
6520 	{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6521 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6522 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6523 	},
6524 	{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6525 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6526 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6527 	},
6528 	{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6529 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6530 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6531 	},
6532 	{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6533 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6534 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6535 	},
6536 	{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6537 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6538 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6539 	},
6540 	{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6541 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6542 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6543 	},
6544 	{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6545 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6546 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6547 	},
6548 	{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6549 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6550 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6551 	},
6552 	{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6553 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6554 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6555 	},
6556 	{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6557 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6558 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6559 	},
6560 	{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6561 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6562 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6563 	},
6564 	{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6565 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6566 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6567 	},
6568 	{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6569 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6570 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6571 	},
6572 	{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6573 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6574 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6575 	},
6576 	{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6577 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6578 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6579 	},
6580 	{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6581 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6582 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6583 	},
6584 	{ "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6585 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6586 	  0, 0
6587 	},
6588 	{ "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6589 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6590 	  0, 0
6591 	},
6592 	{ "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6593 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6594 	  0, 0
6595 	},
6596 	{ "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6597 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6598 	  0, 0
6599 	},
6600 	{ "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6601 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6602 	  0, 0
6603 	},
6604 	{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6605 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6606 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6607 	},
6608 	{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6609 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6610 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6611 	},
6612 	{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6613 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6614 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6615 	},
6616 	{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6617 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6618 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6619 	},
6620 	{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6621 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6622 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6623 	},
6624 	{ "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6625 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6626 	  0, 0
6627 	},
6628 	{ "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6629 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6630 	  0, 0
6631 	},
6632 	{ "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6633 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6634 	  0, 0
6635 	},
6636 	{ "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6637 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6638 	  0, 0
6639 	},
6640 	{ "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6641 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6642 	  0, 0
6643 	},
6644 	{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6645 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6646 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6647 	},
6648 	{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6649 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6650 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6651 	},
6652 	{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6653 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6654 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6655 	},
6656 	{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6657 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6658 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6659 	},
6660 	{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6661 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6662 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6663 	},
6664 	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6665 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6666 	  0, 0
6667 	},
6668 	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6669 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6670 	  0, 0
6671 	},
6672 	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6673 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6674 	  0, 0
6675 	},
6676 	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6677 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6678 	  0, 0
6679 	},
6680 	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6681 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6682 	  0, 0
6683 	},
6684 	{ "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6685 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6686 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6687 	},
6688 	{ "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6689 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6690 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6691 	},
6692 	{ "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6693 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6694 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6695 	},
6696 	{ "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6697 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6698 	  0, 0
6699 	},
6700 	{ "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6701 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6702 	  0, 0
6703 	},
6704 	{ "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6705 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6706 	  0, 0
6707 	},
6708 	{ "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6709 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6710 	  0, 0
6711 	},
6712 	{ "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6713 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6714 	  0, 0
6715 	},
6716 	{ "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6717 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6718 	  0, 0
6719 	}
6720 };
6721 
6722 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6723 				     void *inject_if, uint32_t instance_mask)
6724 {
6725 	struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6726 	int ret;
6727 	struct ta_ras_trigger_error_input block_info = { 0 };
6728 
6729 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6730 		return -EINVAL;
6731 
6732 	if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6733 		return -EINVAL;
6734 
6735 	if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6736 		return -EPERM;
6737 
6738 	if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6739 	      info->head.type)) {
6740 		DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6741 			ras_gfx_subblocks[info->head.sub_block_index].name,
6742 			info->head.type);
6743 		return -EPERM;
6744 	}
6745 
6746 	if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6747 	      info->head.type)) {
6748 		DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6749 			ras_gfx_subblocks[info->head.sub_block_index].name,
6750 			info->head.type);
6751 		return -EPERM;
6752 	}
6753 
6754 	block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6755 	block_info.sub_block_index =
6756 		ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6757 	block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6758 	block_info.address = info->address;
6759 	block_info.value = info->value;
6760 
6761 	mutex_lock(&adev->grbm_idx_mutex);
6762 	ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask);
6763 	mutex_unlock(&adev->grbm_idx_mutex);
6764 
6765 	return ret;
6766 }
6767 
6768 static const char * const vml2_mems[] = {
6769 	"UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6770 	"UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6771 	"UTC_VML2_BANK_CACHE_0_4K_MEM0",
6772 	"UTC_VML2_BANK_CACHE_0_4K_MEM1",
6773 	"UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6774 	"UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6775 	"UTC_VML2_BANK_CACHE_1_4K_MEM0",
6776 	"UTC_VML2_BANK_CACHE_1_4K_MEM1",
6777 	"UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6778 	"UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6779 	"UTC_VML2_BANK_CACHE_2_4K_MEM0",
6780 	"UTC_VML2_BANK_CACHE_2_4K_MEM1",
6781 	"UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6782 	"UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6783 	"UTC_VML2_BANK_CACHE_3_4K_MEM0",
6784 	"UTC_VML2_BANK_CACHE_3_4K_MEM1",
6785 };
6786 
6787 static const char * const vml2_walker_mems[] = {
6788 	"UTC_VML2_CACHE_PDE0_MEM0",
6789 	"UTC_VML2_CACHE_PDE0_MEM1",
6790 	"UTC_VML2_CACHE_PDE1_MEM0",
6791 	"UTC_VML2_CACHE_PDE1_MEM1",
6792 	"UTC_VML2_CACHE_PDE2_MEM0",
6793 	"UTC_VML2_CACHE_PDE2_MEM1",
6794 	"UTC_VML2_RDIF_LOG_FIFO",
6795 };
6796 
6797 static const char * const atc_l2_cache_2m_mems[] = {
6798 	"UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6799 	"UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6800 	"UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6801 	"UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6802 };
6803 
6804 static const char *atc_l2_cache_4k_mems[] = {
6805 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6806 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6807 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6808 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6809 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6810 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6811 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6812 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6813 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6814 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6815 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6816 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6817 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6818 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6819 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6820 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6821 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6822 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6823 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6824 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6825 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6826 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6827 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6828 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6829 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6830 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6831 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6832 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6833 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6834 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6835 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6836 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6837 };
6838 
6839 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6840 					 struct ras_err_data *err_data)
6841 {
6842 	uint32_t i, data;
6843 	uint32_t sec_count, ded_count;
6844 
6845 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6846 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6847 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6848 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6849 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6850 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6851 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6852 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6853 
6854 	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6855 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6856 		data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6857 
6858 		sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6859 		if (sec_count) {
6860 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6861 				"SEC %d\n", i, vml2_mems[i], sec_count);
6862 			err_data->ce_count += sec_count;
6863 		}
6864 
6865 		ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6866 		if (ded_count) {
6867 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6868 				"DED %d\n", i, vml2_mems[i], ded_count);
6869 			err_data->ue_count += ded_count;
6870 		}
6871 	}
6872 
6873 	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6874 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6875 		data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6876 
6877 		sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6878 						SEC_COUNT);
6879 		if (sec_count) {
6880 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6881 				"SEC %d\n", i, vml2_walker_mems[i], sec_count);
6882 			err_data->ce_count += sec_count;
6883 		}
6884 
6885 		ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6886 						DED_COUNT);
6887 		if (ded_count) {
6888 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6889 				"DED %d\n", i, vml2_walker_mems[i], ded_count);
6890 			err_data->ue_count += ded_count;
6891 		}
6892 	}
6893 
6894 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6895 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6896 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6897 
6898 		sec_count = (data & 0x00006000L) >> 0xd;
6899 		if (sec_count) {
6900 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6901 				"SEC %d\n", i, atc_l2_cache_2m_mems[i],
6902 				sec_count);
6903 			err_data->ce_count += sec_count;
6904 		}
6905 	}
6906 
6907 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6908 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6909 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6910 
6911 		sec_count = (data & 0x00006000L) >> 0xd;
6912 		if (sec_count) {
6913 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6914 				"SEC %d\n", i, atc_l2_cache_4k_mems[i],
6915 				sec_count);
6916 			err_data->ce_count += sec_count;
6917 		}
6918 
6919 		ded_count = (data & 0x00018000L) >> 0xf;
6920 		if (ded_count) {
6921 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6922 				"DED %d\n", i, atc_l2_cache_4k_mems[i],
6923 				ded_count);
6924 			err_data->ue_count += ded_count;
6925 		}
6926 	}
6927 
6928 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6929 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6930 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6931 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6932 
6933 	return 0;
6934 }
6935 
6936 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6937 	const struct soc15_reg_entry *reg,
6938 	uint32_t se_id, uint32_t inst_id, uint32_t value,
6939 	uint32_t *sec_count, uint32_t *ded_count)
6940 {
6941 	uint32_t i;
6942 	uint32_t sec_cnt, ded_cnt;
6943 
6944 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6945 		if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6946 			gfx_v9_0_ras_fields[i].seg != reg->seg ||
6947 			gfx_v9_0_ras_fields[i].inst != reg->inst)
6948 			continue;
6949 
6950 		sec_cnt = (value &
6951 				gfx_v9_0_ras_fields[i].sec_count_mask) >>
6952 				gfx_v9_0_ras_fields[i].sec_count_shift;
6953 		if (sec_cnt) {
6954 			dev_info(adev->dev, "GFX SubBlock %s, "
6955 				"Instance[%d][%d], SEC %d\n",
6956 				gfx_v9_0_ras_fields[i].name,
6957 				se_id, inst_id,
6958 				sec_cnt);
6959 			*sec_count += sec_cnt;
6960 		}
6961 
6962 		ded_cnt = (value &
6963 				gfx_v9_0_ras_fields[i].ded_count_mask) >>
6964 				gfx_v9_0_ras_fields[i].ded_count_shift;
6965 		if (ded_cnt) {
6966 			dev_info(adev->dev, "GFX SubBlock %s, "
6967 				"Instance[%d][%d], DED %d\n",
6968 				gfx_v9_0_ras_fields[i].name,
6969 				se_id, inst_id,
6970 				ded_cnt);
6971 			*ded_count += ded_cnt;
6972 		}
6973 	}
6974 
6975 	return 0;
6976 }
6977 
6978 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6979 {
6980 	int i, j, k;
6981 
6982 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6983 		return;
6984 
6985 	/* read back registers to clear the counters */
6986 	mutex_lock(&adev->grbm_idx_mutex);
6987 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6988 		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6989 			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6990 				amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0);
6991 				RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6992 			}
6993 		}
6994 	}
6995 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6996 	mutex_unlock(&adev->grbm_idx_mutex);
6997 
6998 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6999 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
7000 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
7001 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
7002 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
7003 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
7004 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
7005 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
7006 
7007 	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
7008 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
7009 		RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
7010 	}
7011 
7012 	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
7013 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
7014 		RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
7015 	}
7016 
7017 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
7018 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
7019 		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
7020 	}
7021 
7022 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
7023 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
7024 		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
7025 	}
7026 
7027 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
7028 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
7029 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
7030 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
7031 }
7032 
7033 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
7034 					  void *ras_error_status)
7035 {
7036 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
7037 	uint32_t sec_count = 0, ded_count = 0;
7038 	uint32_t i, j, k;
7039 	uint32_t reg_value;
7040 
7041 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
7042 		return;
7043 
7044 	err_data->ue_count = 0;
7045 	err_data->ce_count = 0;
7046 
7047 	mutex_lock(&adev->grbm_idx_mutex);
7048 
7049 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
7050 		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
7051 			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
7052 				amdgpu_gfx_select_se_sh(adev, j, 0, k, 0);
7053 				reg_value =
7054 					RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
7055 				if (reg_value)
7056 					gfx_v9_0_ras_error_count(adev,
7057 						&gfx_v9_0_edc_counter_regs[i],
7058 						j, k, reg_value,
7059 						&sec_count, &ded_count);
7060 			}
7061 		}
7062 	}
7063 
7064 	err_data->ce_count += sec_count;
7065 	err_data->ue_count += ded_count;
7066 
7067 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7068 	mutex_unlock(&adev->grbm_idx_mutex);
7069 
7070 	gfx_v9_0_query_utc_edc_status(adev, err_data);
7071 }
7072 
7073 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
7074 {
7075 	const unsigned int cp_coher_cntl =
7076 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
7077 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
7078 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
7079 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
7080 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
7081 
7082 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
7083 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
7084 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
7085 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
7086 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
7087 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
7088 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
7089 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
7090 }
7091 
7092 static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
7093 					uint32_t pipe, bool enable)
7094 {
7095 	struct amdgpu_device *adev = ring->adev;
7096 	uint32_t val;
7097 	uint32_t wcl_cs_reg;
7098 
7099 	/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
7100 	val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
7101 
7102 	switch (pipe) {
7103 	case 0:
7104 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
7105 		break;
7106 	case 1:
7107 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
7108 		break;
7109 	case 2:
7110 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
7111 		break;
7112 	case 3:
7113 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
7114 		break;
7115 	default:
7116 		DRM_DEBUG("invalid pipe %d\n", pipe);
7117 		return;
7118 	}
7119 
7120 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
7121 
7122 }
7123 static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
7124 {
7125 	struct amdgpu_device *adev = ring->adev;
7126 	uint32_t val;
7127 	int i;
7128 
7129 
7130 	/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
7131 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
7132 	 * around 25% of gpu resources.
7133 	 */
7134 	val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
7135 	amdgpu_ring_emit_wreg(ring,
7136 			      SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
7137 			      val);
7138 
7139 	/* Restrict waves for normal/low priority compute queues as well
7140 	 * to get best QoS for high priority compute jobs.
7141 	 *
7142 	 * amdgpu controls only 1st ME(0-3 CS pipes).
7143 	 */
7144 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
7145 		if (i != ring->pipe)
7146 			gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
7147 
7148 	}
7149 }
7150 
7151 static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
7152 {
7153 	/* Header itself is a NOP packet */
7154 	if (num_nop == 1) {
7155 		amdgpu_ring_write(ring, ring->funcs->nop);
7156 		return;
7157 	}
7158 
7159 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
7160 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
7161 
7162 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
7163 	amdgpu_ring_insert_nop(ring, num_nop - 1);
7164 }
7165 
7166 static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
7167 			      unsigned int vmid,
7168 			      struct amdgpu_fence *timedout_fence)
7169 {
7170 	struct amdgpu_device *adev = ring->adev;
7171 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
7172 	struct amdgpu_ring *kiq_ring = &kiq->ring;
7173 	unsigned long flags;
7174 	int i, r;
7175 
7176 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
7177 		return -EINVAL;
7178 
7179 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
7180 
7181 	spin_lock_irqsave(&kiq->ring_lock, flags);
7182 
7183 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
7184 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
7185 		return -ENOMEM;
7186 	}
7187 
7188 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
7189 				   0, 0);
7190 	amdgpu_ring_commit(kiq_ring);
7191 
7192 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
7193 
7194 	r = amdgpu_ring_test_ring(kiq_ring);
7195 	if (r)
7196 		return r;
7197 
7198 	/* make sure dequeue is complete*/
7199 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
7200 	mutex_lock(&adev->srbm_mutex);
7201 	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
7202 	for (i = 0; i < adev->usec_timeout; i++) {
7203 		if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
7204 			break;
7205 		udelay(1);
7206 	}
7207 	if (i >= adev->usec_timeout)
7208 		r = -ETIMEDOUT;
7209 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
7210 	mutex_unlock(&adev->srbm_mutex);
7211 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
7212 	if (r) {
7213 		dev_err(adev->dev, "fail to wait on hqd deactive\n");
7214 		return r;
7215 	}
7216 
7217 	r = gfx_v9_0_kcq_init_queue(ring, true);
7218 	if (r) {
7219 		dev_err(adev->dev, "fail to init kcq\n");
7220 		return r;
7221 	}
7222 	spin_lock_irqsave(&kiq->ring_lock, flags);
7223 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
7224 	if (r) {
7225 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
7226 		return -ENOMEM;
7227 	}
7228 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
7229 	amdgpu_ring_commit(kiq_ring);
7230 	r = amdgpu_ring_test_ring(kiq_ring);
7231 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
7232 	if (r) {
7233 		DRM_ERROR("fail to remap queue\n");
7234 		return r;
7235 	}
7236 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
7237 }
7238 
7239 static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
7240 {
7241 	struct amdgpu_device *adev = ip_block->adev;
7242 	uint32_t i, j, k, reg, index = 0;
7243 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
7244 
7245 	if (!adev->gfx.ip_dump_core)
7246 		return;
7247 
7248 	for (i = 0; i < reg_count; i++)
7249 		drm_printf(p, "%-50s \t 0x%08x\n",
7250 			   gc_reg_list_9[i].reg_name,
7251 			   adev->gfx.ip_dump_core[i]);
7252 
7253 	/* print compute queue registers for all instances */
7254 	if (!adev->gfx.ip_dump_compute_queues)
7255 		return;
7256 
7257 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
7258 	drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
7259 		   adev->gfx.mec.num_mec,
7260 		   adev->gfx.mec.num_pipe_per_mec,
7261 		   adev->gfx.mec.num_queue_per_pipe);
7262 
7263 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7264 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7265 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7266 				drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
7267 				for (reg = 0; reg < reg_count; reg++) {
7268 					if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
7269 						drm_printf(p, "%-50s \t 0x%08x\n",
7270 							   "mmCP_MEC_ME2_HEADER_DUMP",
7271 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7272 					else
7273 						drm_printf(p, "%-50s \t 0x%08x\n",
7274 							   gc_cp_reg_list_9[reg].reg_name,
7275 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7276 				}
7277 				index += reg_count;
7278 			}
7279 		}
7280 	}
7281 
7282 }
7283 
7284 static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
7285 {
7286 	struct amdgpu_device *adev = ip_block->adev;
7287 	uint32_t i, j, k, reg, index = 0;
7288 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
7289 
7290 	if (!adev->gfx.ip_dump_core || !adev->gfx.num_gfx_rings)
7291 		return;
7292 
7293 	amdgpu_gfx_off_ctrl(adev, false);
7294 	for (i = 0; i < reg_count; i++)
7295 		adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_9[i]));
7296 	amdgpu_gfx_off_ctrl(adev, true);
7297 
7298 	/* dump compute queue registers for all instances */
7299 	if (!adev->gfx.ip_dump_compute_queues)
7300 		return;
7301 
7302 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
7303 	amdgpu_gfx_off_ctrl(adev, false);
7304 	mutex_lock(&adev->srbm_mutex);
7305 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7306 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7307 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7308 				/* ME0 is for GFX so start from 1 for CP */
7309 				soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
7310 
7311 				for (reg = 0; reg < reg_count; reg++) {
7312 					if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
7313 						adev->gfx.ip_dump_compute_queues[index + reg] =
7314 							RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
7315 					else
7316 						adev->gfx.ip_dump_compute_queues[index + reg] =
7317 							RREG32(SOC15_REG_ENTRY_OFFSET(
7318 								       gc_cp_reg_list_9[reg]));
7319 				}
7320 				index += reg_count;
7321 			}
7322 		}
7323 	}
7324 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
7325 	mutex_unlock(&adev->srbm_mutex);
7326 	amdgpu_gfx_off_ctrl(adev, true);
7327 
7328 }
7329 
7330 static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
7331 {
7332 	struct amdgpu_device *adev = ring->adev;
7333 
7334 	/* Emit the cleaner shader */
7335 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
7336 		amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
7337 	else
7338 		amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));
7339 
7340 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
7341 }
7342 
7343 static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
7344 {
7345 	struct amdgpu_device *adev = ring->adev;
7346 	struct amdgpu_ip_block *gfx_block =
7347 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
7348 
7349 	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
7350 
7351 	/* Raven and PCO APUs seem to have stability issues
7352 	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
7353 	 * submission and allow again afterwards.
7354 	 */
7355 	if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
7356 		gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_UNGATE);
7357 }
7358 
7359 static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
7360 {
7361 	struct amdgpu_device *adev = ring->adev;
7362 	struct amdgpu_ip_block *gfx_block =
7363 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
7364 
7365 	/* Raven and PCO APUs seem to have stability issues
7366 	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
7367 	 * submission and allow again afterwards.
7368 	 */
7369 	if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
7370 		gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_GATE);
7371 
7372 	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
7373 }
7374 
7375 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
7376 	.name = "gfx_v9_0",
7377 	.early_init = gfx_v9_0_early_init,
7378 	.late_init = gfx_v9_0_late_init,
7379 	.sw_init = gfx_v9_0_sw_init,
7380 	.sw_fini = gfx_v9_0_sw_fini,
7381 	.hw_init = gfx_v9_0_hw_init,
7382 	.hw_fini = gfx_v9_0_hw_fini,
7383 	.suspend = gfx_v9_0_suspend,
7384 	.resume = gfx_v9_0_resume,
7385 	.is_idle = gfx_v9_0_is_idle,
7386 	.wait_for_idle = gfx_v9_0_wait_for_idle,
7387 	.soft_reset = gfx_v9_0_soft_reset,
7388 	.set_clockgating_state = gfx_v9_0_set_clockgating_state,
7389 	.set_powergating_state = gfx_v9_0_set_powergating_state,
7390 	.get_clockgating_state = gfx_v9_0_get_clockgating_state,
7391 	.dump_ip_state = gfx_v9_ip_dump,
7392 	.print_ip_state = gfx_v9_ip_print,
7393 };
7394 
7395 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
7396 	.type = AMDGPU_RING_TYPE_GFX,
7397 	.align_mask = 0xff,
7398 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7399 	.support_64bit_ptrs = true,
7400 	.secure_submission_supported = true,
7401 	.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
7402 	.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
7403 	.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
7404 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
7405 		5 +  /* COND_EXEC */
7406 		7 +  /* PIPELINE_SYNC */
7407 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7408 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7409 		2 + /* VM_FLUSH */
7410 		8 +  /* FENCE for VM_FLUSH */
7411 		20 + /* GDS switch */
7412 		4 + /* double SWITCH_BUFFER,
7413 		       the first COND_EXEC jump to the place just
7414 			   prior to this double SWITCH_BUFFER  */
7415 		5 + /* COND_EXEC */
7416 		7 +	 /*	HDP_flush */
7417 		4 +	 /*	VGT_flush */
7418 		14 + /*	CE_META */
7419 		31 + /*	DE_META */
7420 		3 + /* CNTX_CTRL */
7421 		5 + /* HDP_INVL */
7422 		8 + 8 + /* FENCE x2 */
7423 		2 + /* SWITCH_BUFFER */
7424 		7 + /* gfx_v9_0_emit_mem_sync */
7425 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7426 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
7427 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
7428 	.emit_fence = gfx_v9_0_ring_emit_fence,
7429 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7430 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7431 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7432 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7433 	.test_ring = gfx_v9_0_ring_test_ring,
7434 	.insert_nop = gfx_v9_ring_insert_nop,
7435 	.pad_ib = amdgpu_ring_generic_pad_ib,
7436 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
7437 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
7438 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
7439 	.preempt_ib = gfx_v9_0_ring_preempt_ib,
7440 	.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
7441 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7442 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7443 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7444 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7445 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7446 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7447 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
7448 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
7449 };
7450 
7451 static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
7452 	.type = AMDGPU_RING_TYPE_GFX,
7453 	.align_mask = 0xff,
7454 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7455 	.support_64bit_ptrs = true,
7456 	.secure_submission_supported = true,
7457 	.get_rptr = amdgpu_sw_ring_get_rptr_gfx,
7458 	.get_wptr = amdgpu_sw_ring_get_wptr_gfx,
7459 	.set_wptr = amdgpu_sw_ring_set_wptr_gfx,
7460 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
7461 		5 +  /* COND_EXEC */
7462 		7 +  /* PIPELINE_SYNC */
7463 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7464 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7465 		2 + /* VM_FLUSH */
7466 		8 +  /* FENCE for VM_FLUSH */
7467 		20 + /* GDS switch */
7468 		4 + /* double SWITCH_BUFFER,
7469 		     * the first COND_EXEC jump to the place just
7470 		     * prior to this double SWITCH_BUFFER
7471 		     */
7472 		5 + /* COND_EXEC */
7473 		7 +	 /*	HDP_flush */
7474 		4 +	 /*	VGT_flush */
7475 		14 + /*	CE_META */
7476 		31 + /*	DE_META */
7477 		3 + /* CNTX_CTRL */
7478 		5 + /* HDP_INVL */
7479 		8 + 8 + /* FENCE x2 */
7480 		2 + /* SWITCH_BUFFER */
7481 		7 + /* gfx_v9_0_emit_mem_sync */
7482 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7483 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
7484 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
7485 	.emit_fence = gfx_v9_0_ring_emit_fence,
7486 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7487 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7488 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7489 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7490 	.test_ring = gfx_v9_0_ring_test_ring,
7491 	.test_ib = gfx_v9_0_ring_test_ib,
7492 	.insert_nop = gfx_v9_ring_insert_nop,
7493 	.pad_ib = amdgpu_ring_generic_pad_ib,
7494 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
7495 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
7496 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
7497 	.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
7498 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7499 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7500 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7501 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7502 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7503 	.patch_cntl = gfx_v9_0_ring_patch_cntl,
7504 	.patch_de = gfx_v9_0_ring_patch_de_meta,
7505 	.patch_ce = gfx_v9_0_ring_patch_ce_meta,
7506 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7507 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
7508 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
7509 };
7510 
7511 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7512 	.type = AMDGPU_RING_TYPE_COMPUTE,
7513 	.align_mask = 0xff,
7514 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7515 	.support_64bit_ptrs = true,
7516 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
7517 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
7518 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
7519 	.emit_frame_size =
7520 		20 + /* gfx_v9_0_ring_emit_gds_switch */
7521 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
7522 		5 + /* hdp invalidate */
7523 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
7524 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7525 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7526 		8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7527 		7 + /* gfx_v9_0_emit_mem_sync */
7528 		5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
7529 		15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
7530 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7531 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
7532 	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
7533 	.emit_fence = gfx_v9_0_ring_emit_fence,
7534 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7535 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7536 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7537 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7538 	.test_ring = gfx_v9_0_ring_test_ring,
7539 	.test_ib = gfx_v9_0_ring_test_ib,
7540 	.insert_nop = gfx_v9_ring_insert_nop,
7541 	.pad_ib = amdgpu_ring_generic_pad_ib,
7542 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7543 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7544 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7545 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7546 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7547 	.emit_wave_limit = gfx_v9_0_emit_wave_limit,
7548 	.reset = gfx_v9_0_reset_kcq,
7549 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7550 	.begin_use = gfx_v9_0_ring_begin_use_compute,
7551 	.end_use = gfx_v9_0_ring_end_use_compute,
7552 };
7553 
7554 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7555 	.type = AMDGPU_RING_TYPE_KIQ,
7556 	.align_mask = 0xff,
7557 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7558 	.support_64bit_ptrs = true,
7559 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
7560 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
7561 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
7562 	.emit_frame_size =
7563 		20 + /* gfx_v9_0_ring_emit_gds_switch */
7564 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
7565 		5 + /* hdp invalidate */
7566 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
7567 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7568 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7569 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
7570 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
7571 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
7572 	.test_ring = gfx_v9_0_ring_test_ring,
7573 	.insert_nop = amdgpu_ring_insert_nop,
7574 	.pad_ib = amdgpu_ring_generic_pad_ib,
7575 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
7576 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7577 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7578 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7579 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7580 };
7581 
7582 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
7583 {
7584 	int i;
7585 
7586 	adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
7587 
7588 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7589 		adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
7590 
7591 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
7592 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
7593 			adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx;
7594 	}
7595 
7596 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
7597 		adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
7598 }
7599 
7600 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
7601 	.set = gfx_v9_0_set_eop_interrupt_state,
7602 	.process = gfx_v9_0_eop_irq,
7603 };
7604 
7605 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
7606 	.set = gfx_v9_0_set_priv_reg_fault_state,
7607 	.process = gfx_v9_0_priv_reg_irq,
7608 };
7609 
7610 static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = {
7611 	.set = gfx_v9_0_set_bad_op_fault_state,
7612 	.process = gfx_v9_0_bad_op_irq,
7613 };
7614 
7615 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
7616 	.set = gfx_v9_0_set_priv_inst_fault_state,
7617 	.process = gfx_v9_0_priv_inst_irq,
7618 };
7619 
7620 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
7621 	.set = gfx_v9_0_set_cp_ecc_error_state,
7622 	.process = amdgpu_gfx_cp_ecc_error_irq,
7623 };
7624 
7625 
7626 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
7627 {
7628 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7629 	adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
7630 
7631 	adev->gfx.priv_reg_irq.num_types = 1;
7632 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
7633 
7634 	adev->gfx.bad_op_irq.num_types = 1;
7635 	adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs;
7636 
7637 	adev->gfx.priv_inst_irq.num_types = 1;
7638 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
7639 
7640 	adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
7641 	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
7642 }
7643 
7644 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
7645 {
7646 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7647 	case IP_VERSION(9, 0, 1):
7648 	case IP_VERSION(9, 2, 1):
7649 	case IP_VERSION(9, 4, 0):
7650 	case IP_VERSION(9, 2, 2):
7651 	case IP_VERSION(9, 1, 0):
7652 	case IP_VERSION(9, 4, 1):
7653 	case IP_VERSION(9, 3, 0):
7654 	case IP_VERSION(9, 4, 2):
7655 		adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
7656 		break;
7657 	default:
7658 		break;
7659 	}
7660 }
7661 
7662 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
7663 {
7664 	/* init asci gds info */
7665 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7666 	case IP_VERSION(9, 0, 1):
7667 	case IP_VERSION(9, 2, 1):
7668 	case IP_VERSION(9, 4, 0):
7669 		adev->gds.gds_size = 0x10000;
7670 		break;
7671 	case IP_VERSION(9, 2, 2):
7672 	case IP_VERSION(9, 1, 0):
7673 	case IP_VERSION(9, 4, 1):
7674 		adev->gds.gds_size = 0x1000;
7675 		break;
7676 	case IP_VERSION(9, 4, 2):
7677 		/* aldebaran removed all the GDS internal memory,
7678 		 * only support GWS opcode in kernel, like barrier
7679 		 * semaphore.etc */
7680 		adev->gds.gds_size = 0;
7681 		break;
7682 	default:
7683 		adev->gds.gds_size = 0x10000;
7684 		break;
7685 	}
7686 
7687 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7688 	case IP_VERSION(9, 0, 1):
7689 	case IP_VERSION(9, 4, 0):
7690 		adev->gds.gds_compute_max_wave_id = 0x7ff;
7691 		break;
7692 	case IP_VERSION(9, 2, 1):
7693 		adev->gds.gds_compute_max_wave_id = 0x27f;
7694 		break;
7695 	case IP_VERSION(9, 2, 2):
7696 	case IP_VERSION(9, 1, 0):
7697 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
7698 			adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
7699 		else
7700 			adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
7701 		break;
7702 	case IP_VERSION(9, 4, 1):
7703 		adev->gds.gds_compute_max_wave_id = 0xfff;
7704 		break;
7705 	case IP_VERSION(9, 4, 2):
7706 		/* deprecated for Aldebaran, no usage at all */
7707 		adev->gds.gds_compute_max_wave_id = 0;
7708 		break;
7709 	default:
7710 		/* this really depends on the chip */
7711 		adev->gds.gds_compute_max_wave_id = 0x7ff;
7712 		break;
7713 	}
7714 
7715 	adev->gds.gws_size = 64;
7716 	adev->gds.oa_size = 16;
7717 }
7718 
7719 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7720 						 u32 bitmap)
7721 {
7722 	u32 data;
7723 
7724 	if (!bitmap)
7725 		return;
7726 
7727 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7728 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7729 
7730 	WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
7731 }
7732 
7733 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7734 {
7735 	u32 data, mask;
7736 
7737 	data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
7738 	data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
7739 
7740 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7741 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7742 
7743 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7744 
7745 	return (~data) & mask;
7746 }
7747 
7748 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
7749 				 struct amdgpu_cu_info *cu_info)
7750 {
7751 	int i, j, k, counter, active_cu_number = 0;
7752 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7753 	unsigned disable_masks[4 * 4];
7754 
7755 	if (!adev || !cu_info)
7756 		return -EINVAL;
7757 
7758 	/*
7759 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
7760 	 */
7761 	if (adev->gfx.config.max_shader_engines *
7762 		adev->gfx.config.max_sh_per_se > 16)
7763 		return -EINVAL;
7764 
7765 	amdgpu_gfx_parse_disable_cu(disable_masks,
7766 				    adev->gfx.config.max_shader_engines,
7767 				    adev->gfx.config.max_sh_per_se);
7768 
7769 	mutex_lock(&adev->grbm_idx_mutex);
7770 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7771 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7772 			mask = 1;
7773 			ao_bitmap = 0;
7774 			counter = 0;
7775 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
7776 			gfx_v9_0_set_user_cu_inactive_bitmap(
7777 				adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
7778 			bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
7779 
7780 			/*
7781 			 * The bitmap(and ao_cu_bitmap) in cu_info structure is
7782 			 * 4x4 size array, and it's usually suitable for Vega
7783 			 * ASICs which has 4*2 SE/SH layout.
7784 			 * But for Arcturus, SE/SH layout is changed to 8*1.
7785 			 * To mostly reduce the impact, we make it compatible
7786 			 * with current bitmap array as below:
7787 			 *    SE4,SH0 --> bitmap[0][1]
7788 			 *    SE5,SH0 --> bitmap[1][1]
7789 			 *    SE6,SH0 --> bitmap[2][1]
7790 			 *    SE7,SH0 --> bitmap[3][1]
7791 			 */
7792 			cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
7793 
7794 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7795 				if (bitmap & mask) {
7796 					if (counter < adev->gfx.config.max_cu_per_sh)
7797 						ao_bitmap |= mask;
7798 					counter ++;
7799 				}
7800 				mask <<= 1;
7801 			}
7802 			active_cu_number += counter;
7803 			if (i < 2 && j < 2)
7804 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7805 			cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7806 		}
7807 	}
7808 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7809 	mutex_unlock(&adev->grbm_idx_mutex);
7810 
7811 	cu_info->number = active_cu_number;
7812 	cu_info->ao_cu_mask = ao_cu_mask;
7813 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7814 
7815 	return 0;
7816 }
7817 
7818 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7819 {
7820 	.type = AMD_IP_BLOCK_TYPE_GFX,
7821 	.major = 9,
7822 	.minor = 0,
7823 	.rev = 0,
7824 	.funcs = &gfx_v9_0_ip_funcs,
7825 };
7826