xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c (revision d526b4efb748d439af68be7d1a8922716a0eb52c)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36 
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39 
40 #include "vega10_enum.h"
41 
42 #include "soc15_common.h"
43 #include "clearstate_gfx9.h"
44 #include "v9_structs.h"
45 
46 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47 
48 #include "amdgpu_ras.h"
49 
50 #include "amdgpu_ring_mux.h"
51 #include "gfx_v9_4.h"
52 #include "gfx_v9_0.h"
53 #include "gfx_v9_0_cleaner_shader.h"
54 #include "gfx_v9_4_2.h"
55 
56 #include "asic_reg/pwr/pwr_10_0_offset.h"
57 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
58 #include "asic_reg/gc/gc_9_0_default.h"
59 
60 #define GFX9_NUM_GFX_RINGS     1
61 #define GFX9_NUM_SW_GFX_RINGS  2
62 #define GFX9_MEC_HPD_SIZE 4096
63 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
64 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
65 
66 #define mmGCEA_PROBE_MAP                        0x070c
67 #define mmGCEA_PROBE_MAP_BASE_IDX               0
68 
69 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
72 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
73 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
74 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
75 
76 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
79 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
80 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
82 
83 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
86 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
87 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
88 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
89 
90 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
91 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/raven_me.bin");
93 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
94 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
95 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
96 
97 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
101 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
102 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
103 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
104 
105 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
108 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
109 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
110 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
111 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
112 
113 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
114 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
115 
116 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
119 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
120 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
121 
122 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
125 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
126 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
127 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
128 
129 MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
130 MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
131 MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
132 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
133 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
134 
135 #define mmTCP_CHAN_STEER_0_ARCT								0x0b03
136 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX							0
137 #define mmTCP_CHAN_STEER_1_ARCT								0x0b04
138 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX							0
139 #define mmTCP_CHAN_STEER_2_ARCT								0x0b09
140 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX							0
141 #define mmTCP_CHAN_STEER_3_ARCT								0x0b0a
142 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX							0
143 #define mmTCP_CHAN_STEER_4_ARCT								0x0b0b
144 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX							0
145 #define mmTCP_CHAN_STEER_5_ARCT								0x0b0c
146 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX							0
147 
148 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir                0x0025
149 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX       1
150 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir                0x0026
151 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX       1
152 
153 static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
154 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS),
155 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2),
156 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT1),
157 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT2),
158 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STALLED_STAT1),
159 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STALLED_STAT1),
160 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_BUSY_STAT),
161 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT),
162 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT),
163 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STATUS),
164 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_ERROR),
165 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_BASE),
166 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_RPTR),
167 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR),
168 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_BASE),
169 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_RPTR),
170 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_WPTR),
171 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_BASE),
172 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_RPTR),
173 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_WPTR),
174 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_BASE),
175 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
176 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
177 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_CMD_BUFSZ),
178 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_CMD_BUFSZ),
179 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_CMD_BUFSZ),
180 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_CMD_BUFSZ),
181 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_LO),
182 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_HI),
183 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BUFSZ),
184 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_LO),
185 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_HI),
186 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BUFSZ),
187 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_LO),
188 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_HI),
189 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BUFSZ),
190 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_LO),
191 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_HI),
192 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BUFSZ),
193 	SOC15_REG_ENTRY_STR(GC, 0, mmCPF_UTCL1_STATUS),
194 	SOC15_REG_ENTRY_STR(GC, 0, mmCPC_UTCL1_STATUS),
195 	SOC15_REG_ENTRY_STR(GC, 0, mmCPG_UTCL1_STATUS),
196 	SOC15_REG_ENTRY_STR(GC, 0, mmGDS_PROTECTION_FAULT),
197 	SOC15_REG_ENTRY_STR(GC, 0, mmGDS_VM_PROTECTION_FAULT),
198 	SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS),
199 	SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_CNTL),
200 	SOC15_REG_ENTRY_STR(GC, 0, mmPA_CL_CNTL_STATUS),
201 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_UTCL1_STATUS),
202 	SOC15_REG_ENTRY_STR(GC, 0, mmRMI_UTCL1_STATUS),
203 	SOC15_REG_ENTRY_STR(GC, 0, mmSQC_DCACHE_UTCL1_STATUS),
204 	SOC15_REG_ENTRY_STR(GC, 0, mmSQC_ICACHE_UTCL1_STATUS),
205 	SOC15_REG_ENTRY_STR(GC, 0, mmSQ_UTCL1_STATUS),
206 	SOC15_REG_ENTRY_STR(GC, 0, mmTCP_UTCL1_STATUS),
207 	SOC15_REG_ENTRY_STR(GC, 0, mmWD_UTCL1_STATUS),
208 	SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL),
209 	SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS),
210 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_DEBUG),
211 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_CNTL),
212 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_INSTR_PNTR),
213 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC1_INSTR_PNTR),
214 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC2_INSTR_PNTR),
215 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_INSTR_PNTR),
216 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_INSTR_PNTR),
217 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STATUS),
218 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_STAT),
219 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_COMMAND),
220 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_MESSAGE),
221 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_1),
222 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_2),
223 	SOC15_REG_ENTRY_STR(GC, 0, mmSMU_RLC_RESPONSE),
224 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SAFE_MODE),
225 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
226 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
227 	SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
228 	/* SE status registers */
229 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
230 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
231 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
232 	SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
233 	/* packet headers */
234 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
235 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
236 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
237 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
238 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
239 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
240 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
241 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
242 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
243 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
244 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
245 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
246 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
247 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
248 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
249 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
250 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
251 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
252 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
253 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
254 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
255 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
256 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
257 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
258 };
259 
260 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
261 	/* compute queue registers */
262 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_VMID),
263 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ACTIVE),
264 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PERSISTENT_STATE),
265 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PIPE_PRIORITY),
266 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUEUE_PRIORITY),
267 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUANTUM),
268 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE),
269 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE_HI),
270 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_RPTR),
271 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
272 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
273 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
274 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_CONTROL),
275 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR),
276 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR_HI),
277 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_RPTR),
278 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_CONTROL),
279 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_REQUEST),
280 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
281 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI),
282 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_CONTROL),
283 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_RPTR),
284 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR),
285 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_EVENTS),
286 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_LO),
287 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_HI),
288 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_CONTROL),
289 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_OFFSET),
290 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_SIZE),
291 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_WG_STATE_OFFSET),
292 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_SIZE),
293 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GDS_RESOURCE_STATE),
294 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ERROR),
295 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR_MEM),
296 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
297 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
298 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
299 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
300 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
301 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
302 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
303 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
304 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
305 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
306 	SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
307 };
308 
309 enum ta_ras_gfx_subblock {
310 	/*CPC*/
311 	TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
312 	TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
313 	TA_RAS_BLOCK__GFX_CPC_UCODE,
314 	TA_RAS_BLOCK__GFX_DC_STATE_ME1,
315 	TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
316 	TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
317 	TA_RAS_BLOCK__GFX_DC_STATE_ME2,
318 	TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
319 	TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
320 	TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
321 	/* CPF*/
322 	TA_RAS_BLOCK__GFX_CPF_INDEX_START,
323 	TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
324 	TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
325 	TA_RAS_BLOCK__GFX_CPF_TAG,
326 	TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
327 	/* CPG*/
328 	TA_RAS_BLOCK__GFX_CPG_INDEX_START,
329 	TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
330 	TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
331 	TA_RAS_BLOCK__GFX_CPG_TAG,
332 	TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
333 	/* GDS*/
334 	TA_RAS_BLOCK__GFX_GDS_INDEX_START,
335 	TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
336 	TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
337 	TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
338 	TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
339 	TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
340 	TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
341 	/* SPI*/
342 	TA_RAS_BLOCK__GFX_SPI_SR_MEM,
343 	/* SQ*/
344 	TA_RAS_BLOCK__GFX_SQ_INDEX_START,
345 	TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
346 	TA_RAS_BLOCK__GFX_SQ_LDS_D,
347 	TA_RAS_BLOCK__GFX_SQ_LDS_I,
348 	TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
349 	TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
350 	/* SQC (3 ranges)*/
351 	TA_RAS_BLOCK__GFX_SQC_INDEX_START,
352 	/* SQC range 0*/
353 	TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
354 	TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
355 		TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
356 	TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
357 	TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
358 	TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
359 	TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
360 	TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
361 	TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
362 	TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
363 		TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
364 	/* SQC range 1*/
365 	TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
366 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
367 		TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
368 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
369 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
370 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
371 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
372 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
373 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
374 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
375 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
376 	TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
377 		TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
378 	/* SQC range 2*/
379 	TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
380 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
381 		TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
382 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
383 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
384 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
385 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
386 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
387 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
388 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
389 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
390 	TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
391 		TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
392 	TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
393 	/* TA*/
394 	TA_RAS_BLOCK__GFX_TA_INDEX_START,
395 	TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
396 	TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
397 	TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
398 	TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
399 	TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
400 	TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
401 	/* TCA*/
402 	TA_RAS_BLOCK__GFX_TCA_INDEX_START,
403 	TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
404 	TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
405 	TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
406 	/* TCC (5 sub-ranges)*/
407 	TA_RAS_BLOCK__GFX_TCC_INDEX_START,
408 	/* TCC range 0*/
409 	TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
410 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
411 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
412 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
413 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
414 	TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
415 	TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
416 	TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
417 	TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
418 	TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
419 	/* TCC range 1*/
420 	TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
421 	TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
422 	TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
423 	TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
424 		TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
425 	/* TCC range 2*/
426 	TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
427 	TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
428 	TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
429 	TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
430 	TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
431 	TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
432 	TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
433 	TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
434 	TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
435 	TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
436 		TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
437 	/* TCC range 3*/
438 	TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
439 	TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
440 	TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
441 	TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
442 		TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
443 	/* TCC range 4*/
444 	TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
445 	TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
446 		TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
447 	TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
448 	TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
449 		TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
450 	TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
451 	/* TCI*/
452 	TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
453 	/* TCP*/
454 	TA_RAS_BLOCK__GFX_TCP_INDEX_START,
455 	TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
456 	TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
457 	TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
458 	TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
459 	TA_RAS_BLOCK__GFX_TCP_DB_RAM,
460 	TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
461 	TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
462 	TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
463 	/* TD*/
464 	TA_RAS_BLOCK__GFX_TD_INDEX_START,
465 	TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
466 	TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
467 	TA_RAS_BLOCK__GFX_TD_CS_FIFO,
468 	TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
469 	/* EA (3 sub-ranges)*/
470 	TA_RAS_BLOCK__GFX_EA_INDEX_START,
471 	/* EA range 0*/
472 	TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
473 	TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
474 	TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
475 	TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
476 	TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
477 	TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
478 	TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
479 	TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
480 	TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
481 	TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
482 	/* EA range 1*/
483 	TA_RAS_BLOCK__GFX_EA_INDEX1_START,
484 	TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
485 	TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
486 	TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
487 	TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
488 	TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
489 	TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
490 	TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
491 	TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
492 	/* EA range 2*/
493 	TA_RAS_BLOCK__GFX_EA_INDEX2_START,
494 	TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
495 	TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
496 	TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
497 	TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
498 	TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
499 	TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
500 	/* UTC VM L2 bank*/
501 	TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
502 	/* UTC VM walker*/
503 	TA_RAS_BLOCK__UTC_VML2_WALKER,
504 	/* UTC ATC L2 2MB cache*/
505 	TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
506 	/* UTC ATC L2 4KB cache*/
507 	TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
508 	TA_RAS_BLOCK__GFX_MAX
509 };
510 
511 struct ras_gfx_subblock {
512 	unsigned char *name;
513 	int ta_subblock;
514 	int hw_supported_error_type;
515 	int sw_supported_error_type;
516 };
517 
518 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
519 	[AMDGPU_RAS_BLOCK__##subblock] = {                                     \
520 		#subblock,                                                     \
521 		TA_RAS_BLOCK__##subblock,                                      \
522 		((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
523 		(((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
524 	}
525 
526 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
527 	AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
528 	AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
529 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
530 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
531 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
532 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
533 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
534 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
535 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
536 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
537 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
538 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
539 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
540 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
541 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
542 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
543 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
544 			     0),
545 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
546 			     0),
547 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
548 	AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
549 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
550 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
551 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
552 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
553 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
554 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
555 			     0, 0),
556 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
557 			     0),
558 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
559 			     0, 0),
560 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
561 			     0),
562 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
563 			     0, 0),
564 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
565 			     0),
566 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
567 			     1),
568 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
569 			     0, 0, 0),
570 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
571 			     0),
572 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
573 			     0),
574 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
575 			     0),
576 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
577 			     0),
578 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
579 			     0),
580 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
581 			     0, 0),
582 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
583 			     0),
584 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
585 			     0),
586 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
587 			     0, 0, 0),
588 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
589 			     0),
590 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
591 			     0),
592 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
593 			     0),
594 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
595 			     0),
596 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
597 			     0),
598 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
599 			     0, 0),
600 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
601 			     0),
602 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
603 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
604 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
605 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
606 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
607 	AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
608 	AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
609 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
610 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
611 			     1),
612 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
613 			     1),
614 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
615 			     1),
616 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
617 			     0),
618 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
619 			     0),
620 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
621 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
622 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
623 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
624 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
625 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
626 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
627 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
628 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
629 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
630 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
631 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
632 			     0),
633 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
634 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
635 			     0),
636 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
637 			     0, 0),
638 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
639 			     0),
640 	AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
641 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
642 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
643 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
644 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
645 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
646 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
647 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
648 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
649 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
650 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
651 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
652 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
653 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
654 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
655 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
656 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
657 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
658 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
659 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
660 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
661 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
662 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
663 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
664 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
665 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
666 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
667 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
668 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
669 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
670 	AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
671 	AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
672 	AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
673 	AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
674 };
675 
676 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
677 {
678 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
679 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
680 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
681 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
682 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
683 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
684 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
685 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
686 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
687 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
688 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
689 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
690 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
691 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
692 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
693 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
694 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
695 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
696 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
697 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
698 };
699 
700 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
701 {
702 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
703 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
704 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
705 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
706 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
707 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
708 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
709 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
710 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
711 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
712 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
713 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
714 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
715 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
716 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
717 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
718 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
719 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
720 };
721 
722 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
723 {
724 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
725 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
726 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
727 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
728 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
729 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
730 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
731 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
732 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
733 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
734 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
735 };
736 
737 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
738 {
739 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
740 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
741 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
742 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
743 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
744 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
745 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
746 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
747 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
748 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
749 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
750 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
751 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
752 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
753 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
754 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
755 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
756 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
757 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
758 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
759 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
760 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
761 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
762 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
763 };
764 
765 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
766 {
767 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
768 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
769 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
770 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
771 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
772 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
773 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
774 };
775 
776 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
777 {
778 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
779 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
780 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
781 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
782 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
783 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
784 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
785 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
786 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
787 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
788 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
789 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
790 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
791 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
792 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
793 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
794 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
795 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
796 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
797 };
798 
799 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
800 {
801 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
802 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
803 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
804 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
805 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
806 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
807 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
808 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
809 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
810 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
811 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
812 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
813 };
814 
815 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
816 {
817 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
818 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
819 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
820 };
821 
822 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
823 {
824 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
825 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
826 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
827 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
828 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
829 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
830 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
831 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
832 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
833 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
834 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
835 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
836 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
837 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
838 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
839 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
840 };
841 
842 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
843 {
844 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
845 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
846 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
847 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
848 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
849 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
850 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
851 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
852 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
853 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
854 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
855 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
856 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
857 };
858 
859 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
860 {
861 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
862 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
863 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
864 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
865 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
866 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
867 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
868 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
869 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
870 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
871 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
872 };
873 
874 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
875 	{SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
876 	{SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
877 };
878 
879 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
880 {
881 	mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
882 	mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
883 	mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
884 	mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
885 	mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
886 	mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
887 	mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
888 	mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
889 };
890 
891 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
892 {
893 	mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
894 	mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
895 	mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
896 	mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
897 	mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
898 	mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
899 	mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
900 	mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
901 };
902 
903 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
904 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
905 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
906 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
907 
908 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
909 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
910 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
911 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
912 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
913 				struct amdgpu_cu_info *cu_info);
914 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
915 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
916 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
917 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
918 					  void *ras_error_status);
919 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
920 				     void *inject_if, uint32_t instance_mask);
921 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
922 static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
923 					      unsigned int vmid);
924 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
925 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
926 
927 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
928 				uint64_t queue_mask)
929 {
930 	struct amdgpu_device *adev = kiq_ring->adev;
931 	u64 shader_mc_addr;
932 
933 	/* Cleaner shader MC address */
934 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
935 
936 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
937 	amdgpu_ring_write(kiq_ring,
938 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
939 		/* vmid_mask:0* queue_type:0 (KIQ) */
940 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
941 	amdgpu_ring_write(kiq_ring,
942 			lower_32_bits(queue_mask));	/* queue mask lo */
943 	amdgpu_ring_write(kiq_ring,
944 			upper_32_bits(queue_mask));	/* queue mask hi */
945 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
946 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
947 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
948 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
949 }
950 
951 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
952 				 struct amdgpu_ring *ring)
953 {
954 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
955 	uint64_t wptr_addr = ring->wptr_gpu_addr;
956 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
957 
958 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
959 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
960 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
961 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
962 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
963 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
964 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
965 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
966 			 /*queue_type: normal compute queue */
967 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
968 			 /* alloc format: all_on_one_pipe */
969 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
970 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
971 			 /* num_queues: must be 1 */
972 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
973 	amdgpu_ring_write(kiq_ring,
974 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
975 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
976 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
977 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
978 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
979 }
980 
981 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
982 				   struct amdgpu_ring *ring,
983 				   enum amdgpu_unmap_queues_action action,
984 				   u64 gpu_addr, u64 seq)
985 {
986 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
987 
988 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
989 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
990 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
991 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
992 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
993 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
994 	amdgpu_ring_write(kiq_ring,
995 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
996 
997 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
998 		amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask));
999 		amdgpu_ring_write(kiq_ring, 0);
1000 		amdgpu_ring_write(kiq_ring, 0);
1001 
1002 	} else {
1003 		amdgpu_ring_write(kiq_ring, 0);
1004 		amdgpu_ring_write(kiq_ring, 0);
1005 		amdgpu_ring_write(kiq_ring, 0);
1006 	}
1007 }
1008 
1009 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
1010 				   struct amdgpu_ring *ring,
1011 				   u64 addr,
1012 				   u64 seq)
1013 {
1014 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
1015 
1016 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
1017 	amdgpu_ring_write(kiq_ring,
1018 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
1019 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
1020 			  PACKET3_QUERY_STATUS_COMMAND(2));
1021 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
1022 	amdgpu_ring_write(kiq_ring,
1023 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
1024 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
1025 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
1026 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
1027 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
1028 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
1029 }
1030 
1031 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
1032 				uint16_t pasid, uint32_t flush_type,
1033 				bool all_hub)
1034 {
1035 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
1036 	amdgpu_ring_write(kiq_ring,
1037 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
1038 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
1039 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
1040 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
1041 }
1042 
1043 
1044 static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
1045 					uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
1046 					uint32_t xcc_id, uint32_t vmid)
1047 {
1048 	struct amdgpu_device *adev = kiq_ring->adev;
1049 	unsigned i;
1050 
1051 	/* enter save mode */
1052 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1053 	mutex_lock(&adev->srbm_mutex);
1054 	soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0);
1055 
1056 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1057 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
1058 		WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
1059 		/* wait till dequeue take effects */
1060 		for (i = 0; i < adev->usec_timeout; i++) {
1061 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
1062 				break;
1063 			udelay(1);
1064 		}
1065 		if (i >= adev->usec_timeout)
1066 			dev_err(adev->dev, "fail to wait on hqd deactive\n");
1067 	} else {
1068 		dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
1069 	}
1070 
1071 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
1072 	mutex_unlock(&adev->srbm_mutex);
1073 	/* exit safe mode */
1074 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1075 }
1076 
1077 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
1078 	.kiq_set_resources = gfx_v9_0_kiq_set_resources,
1079 	.kiq_map_queues = gfx_v9_0_kiq_map_queues,
1080 	.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
1081 	.kiq_query_status = gfx_v9_0_kiq_query_status,
1082 	.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
1083 	.kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue,
1084 	.set_resources_size = 8,
1085 	.map_queues_size = 7,
1086 	.unmap_queues_size = 6,
1087 	.query_status_size = 7,
1088 	.invalidate_tlbs_size = 2,
1089 };
1090 
1091 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
1092 {
1093 	adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
1094 }
1095 
1096 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
1097 {
1098 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1099 	case IP_VERSION(9, 0, 1):
1100 		soc15_program_register_sequence(adev,
1101 						golden_settings_gc_9_0,
1102 						ARRAY_SIZE(golden_settings_gc_9_0));
1103 		soc15_program_register_sequence(adev,
1104 						golden_settings_gc_9_0_vg10,
1105 						ARRAY_SIZE(golden_settings_gc_9_0_vg10));
1106 		break;
1107 	case IP_VERSION(9, 2, 1):
1108 		soc15_program_register_sequence(adev,
1109 						golden_settings_gc_9_2_1,
1110 						ARRAY_SIZE(golden_settings_gc_9_2_1));
1111 		soc15_program_register_sequence(adev,
1112 						golden_settings_gc_9_2_1_vg12,
1113 						ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
1114 		break;
1115 	case IP_VERSION(9, 4, 0):
1116 		soc15_program_register_sequence(adev,
1117 						golden_settings_gc_9_0,
1118 						ARRAY_SIZE(golden_settings_gc_9_0));
1119 		soc15_program_register_sequence(adev,
1120 						golden_settings_gc_9_0_vg20,
1121 						ARRAY_SIZE(golden_settings_gc_9_0_vg20));
1122 		break;
1123 	case IP_VERSION(9, 4, 1):
1124 		soc15_program_register_sequence(adev,
1125 						golden_settings_gc_9_4_1_arct,
1126 						ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
1127 		break;
1128 	case IP_VERSION(9, 2, 2):
1129 	case IP_VERSION(9, 1, 0):
1130 		soc15_program_register_sequence(adev, golden_settings_gc_9_1,
1131 						ARRAY_SIZE(golden_settings_gc_9_1));
1132 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1133 			soc15_program_register_sequence(adev,
1134 							golden_settings_gc_9_1_rv2,
1135 							ARRAY_SIZE(golden_settings_gc_9_1_rv2));
1136 		else
1137 			soc15_program_register_sequence(adev,
1138 							golden_settings_gc_9_1_rv1,
1139 							ARRAY_SIZE(golden_settings_gc_9_1_rv1));
1140 		break;
1141 	 case IP_VERSION(9, 3, 0):
1142 		soc15_program_register_sequence(adev,
1143 						golden_settings_gc_9_1_rn,
1144 						ARRAY_SIZE(golden_settings_gc_9_1_rn));
1145 		return; /* for renoir, don't need common goldensetting */
1146 	case IP_VERSION(9, 4, 2):
1147 		gfx_v9_4_2_init_golden_registers(adev,
1148 						 adev->smuio.funcs->get_die_id(adev));
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
1155 	    (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)))
1156 		soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
1157 						(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
1158 }
1159 
1160 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1161 				       bool wc, uint32_t reg, uint32_t val)
1162 {
1163 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1164 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1165 				WRITE_DATA_DST_SEL(0) |
1166 				(wc ? WR_CONFIRM : 0));
1167 	amdgpu_ring_write(ring, reg);
1168 	amdgpu_ring_write(ring, 0);
1169 	amdgpu_ring_write(ring, val);
1170 }
1171 
1172 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1173 				  int mem_space, int opt, uint32_t addr0,
1174 				  uint32_t addr1, uint32_t ref, uint32_t mask,
1175 				  uint32_t inv)
1176 {
1177 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1178 	amdgpu_ring_write(ring,
1179 				 /* memory (1) or register (0) */
1180 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1181 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
1182 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1183 				 WAIT_REG_MEM_ENGINE(eng_sel)));
1184 
1185 	if (mem_space)
1186 		BUG_ON(addr0 & 0x3); /* Dword align */
1187 	amdgpu_ring_write(ring, addr0);
1188 	amdgpu_ring_write(ring, addr1);
1189 	amdgpu_ring_write(ring, ref);
1190 	amdgpu_ring_write(ring, mask);
1191 	amdgpu_ring_write(ring, inv); /* poll interval */
1192 }
1193 
1194 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1195 {
1196 	struct amdgpu_device *adev = ring->adev;
1197 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1198 	uint32_t tmp = 0;
1199 	unsigned i;
1200 	int r;
1201 
1202 	WREG32(scratch, 0xCAFEDEAD);
1203 	r = amdgpu_ring_alloc(ring, 3);
1204 	if (r)
1205 		return r;
1206 
1207 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1208 	amdgpu_ring_write(ring, scratch - PACKET3_SET_UCONFIG_REG_START);
1209 	amdgpu_ring_write(ring, 0xDEADBEEF);
1210 	amdgpu_ring_commit(ring);
1211 
1212 	for (i = 0; i < adev->usec_timeout; i++) {
1213 		tmp = RREG32(scratch);
1214 		if (tmp == 0xDEADBEEF)
1215 			break;
1216 		udelay(1);
1217 	}
1218 
1219 	if (i >= adev->usec_timeout)
1220 		r = -ETIMEDOUT;
1221 	return r;
1222 }
1223 
1224 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1225 {
1226 	struct amdgpu_device *adev = ring->adev;
1227 	struct amdgpu_ib ib;
1228 	struct dma_fence *f = NULL;
1229 
1230 	unsigned index;
1231 	uint64_t gpu_addr;
1232 	uint32_t tmp;
1233 	long r;
1234 
1235 	r = amdgpu_device_wb_get(adev, &index);
1236 	if (r)
1237 		return r;
1238 
1239 	gpu_addr = adev->wb.gpu_addr + (index * 4);
1240 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1241 	memset(&ib, 0, sizeof(ib));
1242 
1243 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
1244 	if (r)
1245 		goto err1;
1246 
1247 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1248 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1249 	ib.ptr[2] = lower_32_bits(gpu_addr);
1250 	ib.ptr[3] = upper_32_bits(gpu_addr);
1251 	ib.ptr[4] = 0xDEADBEEF;
1252 	ib.length_dw = 5;
1253 
1254 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1255 	if (r)
1256 		goto err2;
1257 
1258 	r = dma_fence_wait_timeout(f, false, timeout);
1259 	if (r == 0) {
1260 		r = -ETIMEDOUT;
1261 		goto err2;
1262 	} else if (r < 0) {
1263 		goto err2;
1264 	}
1265 
1266 	tmp = adev->wb.wb[index];
1267 	if (tmp == 0xDEADBEEF)
1268 		r = 0;
1269 	else
1270 		r = -EINVAL;
1271 
1272 err2:
1273 	amdgpu_ib_free(&ib, NULL);
1274 	dma_fence_put(f);
1275 err1:
1276 	amdgpu_device_wb_free(adev, index);
1277 	return r;
1278 }
1279 
1280 
1281 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1282 {
1283 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
1284 	amdgpu_ucode_release(&adev->gfx.me_fw);
1285 	amdgpu_ucode_release(&adev->gfx.ce_fw);
1286 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
1287 	amdgpu_ucode_release(&adev->gfx.mec_fw);
1288 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
1289 
1290 	kfree(adev->gfx.rlc.register_list_format);
1291 }
1292 
1293 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1294 {
1295 	adev->gfx.me_fw_write_wait = false;
1296 	adev->gfx.mec_fw_write_wait = false;
1297 
1298 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
1299 	    (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) &&
1300 	    ((adev->gfx.mec_fw_version < 0x000001a5) ||
1301 	     (adev->gfx.mec_feature_version < 46) ||
1302 	     (adev->gfx.pfp_fw_version < 0x000000b7) ||
1303 	     (adev->gfx.pfp_feature_version < 46)))
1304 		DRM_WARN_ONCE("CP firmware version too old, please update!");
1305 
1306 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1307 	case IP_VERSION(9, 0, 1):
1308 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1309 		    (adev->gfx.me_feature_version >= 42) &&
1310 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1311 		    (adev->gfx.pfp_feature_version >= 42))
1312 			adev->gfx.me_fw_write_wait = true;
1313 
1314 		if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1315 		    (adev->gfx.mec_feature_version >= 42))
1316 			adev->gfx.mec_fw_write_wait = true;
1317 		break;
1318 	case IP_VERSION(9, 2, 1):
1319 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1320 		    (adev->gfx.me_feature_version >= 44) &&
1321 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1322 		    (adev->gfx.pfp_feature_version >= 44))
1323 			adev->gfx.me_fw_write_wait = true;
1324 
1325 		if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1326 		    (adev->gfx.mec_feature_version >= 44))
1327 			adev->gfx.mec_fw_write_wait = true;
1328 		break;
1329 	case IP_VERSION(9, 4, 0):
1330 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1331 		    (adev->gfx.me_feature_version >= 44) &&
1332 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1333 		    (adev->gfx.pfp_feature_version >= 44))
1334 			adev->gfx.me_fw_write_wait = true;
1335 
1336 		if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1337 		    (adev->gfx.mec_feature_version >= 44))
1338 			adev->gfx.mec_fw_write_wait = true;
1339 		break;
1340 	case IP_VERSION(9, 1, 0):
1341 	case IP_VERSION(9, 2, 2):
1342 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1343 		    (adev->gfx.me_feature_version >= 42) &&
1344 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1345 		    (adev->gfx.pfp_feature_version >= 42))
1346 			adev->gfx.me_fw_write_wait = true;
1347 
1348 		if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1349 		    (adev->gfx.mec_feature_version >= 42))
1350 			adev->gfx.mec_fw_write_wait = true;
1351 		break;
1352 	default:
1353 		adev->gfx.me_fw_write_wait = true;
1354 		adev->gfx.mec_fw_write_wait = true;
1355 		break;
1356 	}
1357 }
1358 
1359 struct amdgpu_gfxoff_quirk {
1360 	u16 chip_vendor;
1361 	u16 chip_device;
1362 	u16 subsys_vendor;
1363 	u16 subsys_device;
1364 	u8 revision;
1365 };
1366 
1367 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1368 	/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1369 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1370 	/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1371 	{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1372 	/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1373 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1374 	/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
1375 	{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
1376 	/* https://bbs.openkylin.top/t/topic/171497 */
1377 	{ 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
1378 	/* HP 705G4 DM with R5 2400G */
1379 	{ 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
1380 	{ 0, 0, 0, 0, 0 },
1381 };
1382 
1383 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1384 {
1385 	const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1386 
1387 	while (p && p->chip_device != 0) {
1388 		if (pdev->vendor == p->chip_vendor &&
1389 		    pdev->device == p->chip_device &&
1390 		    pdev->subsystem_vendor == p->subsys_vendor &&
1391 		    pdev->subsystem_device == p->subsys_device &&
1392 		    pdev->revision == p->revision) {
1393 			return true;
1394 		}
1395 		++p;
1396 	}
1397 	return false;
1398 }
1399 
1400 static bool is_raven_kicker(struct amdgpu_device *adev)
1401 {
1402 	if (adev->pm.fw_version >= 0x41e2b)
1403 		return true;
1404 	else
1405 		return false;
1406 }
1407 
1408 static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
1409 {
1410 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) &&
1411 	    (adev->gfx.me_fw_version >= 0x000000a5) &&
1412 	    (adev->gfx.me_feature_version >= 52))
1413 		return true;
1414 	else
1415 		return false;
1416 }
1417 
1418 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1419 {
1420 	if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1421 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1422 
1423 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1424 	case IP_VERSION(9, 0, 1):
1425 	case IP_VERSION(9, 2, 1):
1426 	case IP_VERSION(9, 4, 0):
1427 		break;
1428 	case IP_VERSION(9, 2, 2):
1429 	case IP_VERSION(9, 1, 0):
1430 		if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1431 		      (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1432 		    ((!is_raven_kicker(adev) &&
1433 		      adev->gfx.rlc_fw_version < 531) ||
1434 		     (adev->gfx.rlc_feature_version < 1) ||
1435 		     !adev->gfx.rlc.is_rlc_v2_1))
1436 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1437 
1438 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1439 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1440 				AMD_PG_SUPPORT_CP |
1441 				AMD_PG_SUPPORT_RLC_SMU_HS;
1442 		break;
1443 	case IP_VERSION(9, 3, 0):
1444 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1445 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1446 				AMD_PG_SUPPORT_CP |
1447 				AMD_PG_SUPPORT_RLC_SMU_HS;
1448 		break;
1449 	default:
1450 		break;
1451 	}
1452 }
1453 
1454 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1455 					  char *chip_name)
1456 {
1457 	int err;
1458 
1459 	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
1460 				   AMDGPU_UCODE_REQUIRED,
1461 				   "amdgpu/%s_pfp.bin", chip_name);
1462 	if (err)
1463 		goto out;
1464 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
1465 
1466 	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
1467 				   AMDGPU_UCODE_REQUIRED,
1468 				   "amdgpu/%s_me.bin", chip_name);
1469 	if (err)
1470 		goto out;
1471 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
1472 
1473 	err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
1474 				   AMDGPU_UCODE_REQUIRED,
1475 				   "amdgpu/%s_ce.bin", chip_name);
1476 	if (err)
1477 		goto out;
1478 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
1479 
1480 out:
1481 	if (err) {
1482 		amdgpu_ucode_release(&adev->gfx.pfp_fw);
1483 		amdgpu_ucode_release(&adev->gfx.me_fw);
1484 		amdgpu_ucode_release(&adev->gfx.ce_fw);
1485 	}
1486 	return err;
1487 }
1488 
1489 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1490 				       char *chip_name)
1491 {
1492 	int err;
1493 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1494 	uint16_t version_major;
1495 	uint16_t version_minor;
1496 	uint32_t smu_version;
1497 
1498 	/*
1499 	 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1500 	 * instead of picasso_rlc.bin.
1501 	 * Judgment method:
1502 	 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1503 	 *          or revision >= 0xD8 && revision <= 0xDF
1504 	 * otherwise is PCO FP5
1505 	 */
1506 	if (!strcmp(chip_name, "picasso") &&
1507 		(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1508 		((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1509 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1510 					   AMDGPU_UCODE_REQUIRED,
1511 					   "amdgpu/%s_rlc_am4.bin", chip_name);
1512 	else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1513 		(smu_version >= 0x41e2b))
1514 		/**
1515 		*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1516 		*/
1517 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1518 					   AMDGPU_UCODE_REQUIRED,
1519 					   "amdgpu/%s_kicker_rlc.bin", chip_name);
1520 	else
1521 		err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1522 					   AMDGPU_UCODE_REQUIRED,
1523 					   "amdgpu/%s_rlc.bin", chip_name);
1524 	if (err)
1525 		goto out;
1526 
1527 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1528 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1529 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1530 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
1531 out:
1532 	if (err)
1533 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
1534 
1535 	return err;
1536 }
1537 
1538 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
1539 {
1540 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
1541 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1542 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0))
1543 		return false;
1544 
1545 	return true;
1546 }
1547 
1548 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1549 					      char *chip_name)
1550 {
1551 	int err;
1552 
1553 	if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1554 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1555 				   AMDGPU_UCODE_REQUIRED,
1556 				   "amdgpu/%s_sjt_mec.bin", chip_name);
1557 	else
1558 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1559 					   AMDGPU_UCODE_REQUIRED,
1560 					   "amdgpu/%s_mec.bin", chip_name);
1561 	if (err)
1562 		goto out;
1563 
1564 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
1565 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
1566 
1567 	if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1568 		if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1569 			err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1570 						   AMDGPU_UCODE_REQUIRED,
1571 						   "amdgpu/%s_sjt_mec2.bin", chip_name);
1572 		else
1573 			err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1574 						   AMDGPU_UCODE_REQUIRED,
1575 						   "amdgpu/%s_mec2.bin", chip_name);
1576 		if (!err) {
1577 			amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
1578 			amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
1579 		} else {
1580 			err = 0;
1581 			amdgpu_ucode_release(&adev->gfx.mec2_fw);
1582 		}
1583 	} else {
1584 		adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
1585 		adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
1586 	}
1587 
1588 	gfx_v9_0_check_if_need_gfxoff(adev);
1589 	gfx_v9_0_check_fw_write_wait(adev);
1590 
1591 out:
1592 	if (err)
1593 		amdgpu_ucode_release(&adev->gfx.mec_fw);
1594 	return err;
1595 }
1596 
1597 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1598 {
1599 	char ucode_prefix[30];
1600 	int r;
1601 
1602 	DRM_DEBUG("\n");
1603 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
1604 
1605 	/* No CPG in Arcturus */
1606 	if (adev->gfx.num_gfx_rings) {
1607 		r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
1608 		if (r)
1609 			return r;
1610 	}
1611 
1612 	r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
1613 	if (r)
1614 		return r;
1615 
1616 	r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
1617 	if (r)
1618 		return r;
1619 
1620 	return r;
1621 }
1622 
1623 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1624 {
1625 	u32 count = 0;
1626 	const struct cs_section_def *sect = NULL;
1627 	const struct cs_extent_def *ext = NULL;
1628 
1629 	/* begin clear state */
1630 	count += 2;
1631 	/* context control state */
1632 	count += 3;
1633 
1634 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1635 		for (ext = sect->section; ext->extent != NULL; ++ext) {
1636 			if (sect->id == SECT_CONTEXT)
1637 				count += 2 + ext->reg_count;
1638 			else
1639 				return 0;
1640 		}
1641 	}
1642 
1643 	/* end clear state */
1644 	count += 2;
1645 	/* clear state */
1646 	count += 2;
1647 
1648 	return count;
1649 }
1650 
1651 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1652 				    volatile u32 *buffer)
1653 {
1654 	u32 count = 0, i;
1655 	const struct cs_section_def *sect = NULL;
1656 	const struct cs_extent_def *ext = NULL;
1657 
1658 	if (adev->gfx.rlc.cs_data == NULL)
1659 		return;
1660 	if (buffer == NULL)
1661 		return;
1662 
1663 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1664 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1665 
1666 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1667 	buffer[count++] = cpu_to_le32(0x80000000);
1668 	buffer[count++] = cpu_to_le32(0x80000000);
1669 
1670 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1671 		for (ext = sect->section; ext->extent != NULL; ++ext) {
1672 			if (sect->id == SECT_CONTEXT) {
1673 				buffer[count++] =
1674 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1675 				buffer[count++] = cpu_to_le32(ext->reg_index -
1676 						PACKET3_SET_CONTEXT_REG_START);
1677 				for (i = 0; i < ext->reg_count; i++)
1678 					buffer[count++] = cpu_to_le32(ext->extent[i]);
1679 			}
1680 		}
1681 	}
1682 
1683 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1684 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1685 
1686 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1687 	buffer[count++] = cpu_to_le32(0);
1688 }
1689 
1690 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1691 {
1692 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1693 	uint32_t pg_always_on_cu_num = 2;
1694 	uint32_t always_on_cu_num;
1695 	uint32_t i, j, k;
1696 	uint32_t mask, cu_bitmap, counter;
1697 
1698 	if (adev->flags & AMD_IS_APU)
1699 		always_on_cu_num = 4;
1700 	else if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 2, 1))
1701 		always_on_cu_num = 8;
1702 	else
1703 		always_on_cu_num = 12;
1704 
1705 	mutex_lock(&adev->grbm_idx_mutex);
1706 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1707 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1708 			mask = 1;
1709 			cu_bitmap = 0;
1710 			counter = 0;
1711 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
1712 
1713 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1714 				if (cu_info->bitmap[0][i][j] & mask) {
1715 					if (counter == pg_always_on_cu_num)
1716 						WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1717 					if (counter < always_on_cu_num)
1718 						cu_bitmap |= mask;
1719 					else
1720 						break;
1721 					counter++;
1722 				}
1723 				mask <<= 1;
1724 			}
1725 
1726 			WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1727 			cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1728 		}
1729 	}
1730 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1731 	mutex_unlock(&adev->grbm_idx_mutex);
1732 }
1733 
1734 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1735 {
1736 	uint32_t data;
1737 
1738 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1739 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1740 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1741 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1742 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1743 
1744 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1745 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1746 
1747 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1748 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1749 
1750 	mutex_lock(&adev->grbm_idx_mutex);
1751 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1752 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1753 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1754 
1755 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1756 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1757 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1758 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1759 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1760 
1761 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1762 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1763 	data &= 0x0000FFFF;
1764 	data |= 0x00C00000;
1765 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1766 
1767 	/*
1768 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1769 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1770 	 */
1771 
1772 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1773 	 * but used for RLC_LB_CNTL configuration */
1774 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1775 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1776 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1777 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1778 	mutex_unlock(&adev->grbm_idx_mutex);
1779 
1780 	gfx_v9_0_init_always_on_cu_mask(adev);
1781 }
1782 
1783 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1784 {
1785 	uint32_t data;
1786 
1787 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1788 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1789 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1790 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1791 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1792 
1793 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1794 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1795 
1796 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1797 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1798 
1799 	mutex_lock(&adev->grbm_idx_mutex);
1800 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1801 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1802 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1803 
1804 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1805 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1806 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1807 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1808 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1809 
1810 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1811 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1812 	data &= 0x0000FFFF;
1813 	data |= 0x00C00000;
1814 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1815 
1816 	/*
1817 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1818 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1819 	 */
1820 
1821 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1822 	 * but used for RLC_LB_CNTL configuration */
1823 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1824 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1825 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1826 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1827 	mutex_unlock(&adev->grbm_idx_mutex);
1828 
1829 	gfx_v9_0_init_always_on_cu_mask(adev);
1830 }
1831 
1832 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1833 {
1834 	WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1835 }
1836 
1837 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1838 {
1839 	if (gfx_v9_0_load_mec2_fw_bin_support(adev))
1840 		return 5;
1841 	else
1842 		return 4;
1843 }
1844 
1845 static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1846 {
1847 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1848 
1849 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
1850 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1851 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
1852 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
1853 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
1854 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
1855 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
1856 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
1857 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1858 }
1859 
1860 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1861 {
1862 	const struct cs_section_def *cs_data;
1863 	int r;
1864 
1865 	adev->gfx.rlc.cs_data = gfx9_cs_data;
1866 
1867 	cs_data = adev->gfx.rlc.cs_data;
1868 
1869 	if (cs_data) {
1870 		/* init clear state block */
1871 		r = amdgpu_gfx_rlc_init_csb(adev);
1872 		if (r)
1873 			return r;
1874 	}
1875 
1876 	if (adev->flags & AMD_IS_APU) {
1877 		/* TODO: double check the cp_table_size for RV */
1878 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1879 		r = amdgpu_gfx_rlc_init_cpt(adev);
1880 		if (r)
1881 			return r;
1882 	}
1883 
1884 	return 0;
1885 }
1886 
1887 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1888 {
1889 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1890 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1891 }
1892 
1893 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1894 {
1895 	int r;
1896 	u32 *hpd;
1897 	const __le32 *fw_data;
1898 	unsigned fw_size;
1899 	u32 *fw;
1900 	size_t mec_hpd_size;
1901 
1902 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1903 
1904 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1905 
1906 	/* take ownership of the relevant compute queues */
1907 	amdgpu_gfx_compute_queue_acquire(adev);
1908 	mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1909 	if (mec_hpd_size) {
1910 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1911 					      AMDGPU_GEM_DOMAIN_VRAM |
1912 					      AMDGPU_GEM_DOMAIN_GTT,
1913 					      &adev->gfx.mec.hpd_eop_obj,
1914 					      &adev->gfx.mec.hpd_eop_gpu_addr,
1915 					      (void **)&hpd);
1916 		if (r) {
1917 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1918 			gfx_v9_0_mec_fini(adev);
1919 			return r;
1920 		}
1921 
1922 		memset(hpd, 0, mec_hpd_size);
1923 
1924 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1925 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1926 	}
1927 
1928 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1929 
1930 	fw_data = (const __le32 *)
1931 		(adev->gfx.mec_fw->data +
1932 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1933 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1934 
1935 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1936 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1937 				      &adev->gfx.mec.mec_fw_obj,
1938 				      &adev->gfx.mec.mec_fw_gpu_addr,
1939 				      (void **)&fw);
1940 	if (r) {
1941 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1942 		gfx_v9_0_mec_fini(adev);
1943 		return r;
1944 	}
1945 
1946 	memcpy(fw, fw_data, fw_size);
1947 
1948 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1949 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1950 
1951 	return 0;
1952 }
1953 
1954 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1955 {
1956 	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1957 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1958 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1959 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
1960 		(SQ_IND_INDEX__FORCE_READ_MASK));
1961 	return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1962 }
1963 
1964 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1965 			   uint32_t wave, uint32_t thread,
1966 			   uint32_t regno, uint32_t num, uint32_t *out)
1967 {
1968 	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1969 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1970 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1971 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
1972 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1973 		(SQ_IND_INDEX__FORCE_READ_MASK) |
1974 		(SQ_IND_INDEX__AUTO_INCR_MASK));
1975 	while (num--)
1976 		*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1977 }
1978 
1979 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1980 {
1981 	/* type 1 wave data */
1982 	dst[(*no_fields)++] = 1;
1983 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1984 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1985 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1986 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1987 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1988 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1989 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1990 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1991 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1992 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1993 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1994 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1995 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1996 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1997 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
1998 }
1999 
2000 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
2001 				     uint32_t wave, uint32_t start,
2002 				     uint32_t size, uint32_t *dst)
2003 {
2004 	wave_read_regs(
2005 		adev, simd, wave, 0,
2006 		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2007 }
2008 
2009 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
2010 				     uint32_t wave, uint32_t thread,
2011 				     uint32_t start, uint32_t size,
2012 				     uint32_t *dst)
2013 {
2014 	wave_read_regs(
2015 		adev, simd, wave, thread,
2016 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2017 }
2018 
2019 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2020 				  u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
2021 {
2022 	soc15_grbm_select(adev, me, pipe, q, vm, 0);
2023 }
2024 
2025 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2026         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2027         .select_se_sh = &gfx_v9_0_select_se_sh,
2028         .read_wave_data = &gfx_v9_0_read_wave_data,
2029         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2030         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2031         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2032 };
2033 
2034 const struct amdgpu_ras_block_hw_ops  gfx_v9_0_ras_ops = {
2035 		.ras_error_inject = &gfx_v9_0_ras_error_inject,
2036 		.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2037 		.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2038 };
2039 
2040 static struct amdgpu_gfx_ras gfx_v9_0_ras = {
2041 	.ras_block = {
2042 		.hw_ops = &gfx_v9_0_ras_ops,
2043 	},
2044 };
2045 
2046 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2047 {
2048 	u32 gb_addr_config;
2049 	int err;
2050 
2051 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2052 	case IP_VERSION(9, 0, 1):
2053 		adev->gfx.config.max_hw_contexts = 8;
2054 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2055 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2056 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2057 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2058 		gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2059 		break;
2060 	case IP_VERSION(9, 2, 1):
2061 		adev->gfx.config.max_hw_contexts = 8;
2062 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2063 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2064 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2065 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2066 		gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2067 		DRM_INFO("fix gfx.config for vega12\n");
2068 		break;
2069 	case IP_VERSION(9, 4, 0):
2070 		adev->gfx.ras = &gfx_v9_0_ras;
2071 		adev->gfx.config.max_hw_contexts = 8;
2072 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2073 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2074 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2075 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2076 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2077 		gb_addr_config &= ~0xf3e777ff;
2078 		gb_addr_config |= 0x22014042;
2079 		/* check vbios table if gpu info is not available */
2080 		err = amdgpu_atomfirmware_get_gfx_info(adev);
2081 		if (err)
2082 			return err;
2083 		break;
2084 	case IP_VERSION(9, 2, 2):
2085 	case IP_VERSION(9, 1, 0):
2086 		adev->gfx.config.max_hw_contexts = 8;
2087 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2088 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2089 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2090 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2091 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2092 			gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2093 		else
2094 			gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2095 		break;
2096 	case IP_VERSION(9, 4, 1):
2097 		adev->gfx.ras = &gfx_v9_4_ras;
2098 		adev->gfx.config.max_hw_contexts = 8;
2099 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2100 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2101 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2102 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2103 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2104 		gb_addr_config &= ~0xf3e777ff;
2105 		gb_addr_config |= 0x22014042;
2106 		break;
2107 	case IP_VERSION(9, 3, 0):
2108 		adev->gfx.config.max_hw_contexts = 8;
2109 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2110 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2111 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2112 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2113 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2114 		gb_addr_config &= ~0xf3e777ff;
2115 		gb_addr_config |= 0x22010042;
2116 		break;
2117 	case IP_VERSION(9, 4, 2):
2118 		adev->gfx.ras = &gfx_v9_4_2_ras;
2119 		adev->gfx.config.max_hw_contexts = 8;
2120 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2121 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2122 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2123 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2124 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2125 		gb_addr_config &= ~0xf3e777ff;
2126 		gb_addr_config |= 0x22014042;
2127 		/* check vbios table if gpu info is not available */
2128 		err = amdgpu_atomfirmware_get_gfx_info(adev);
2129 		if (err)
2130 			return err;
2131 		break;
2132 	default:
2133 		BUG();
2134 		break;
2135 	}
2136 
2137 	adev->gfx.config.gb_addr_config = gb_addr_config;
2138 
2139 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2140 			REG_GET_FIELD(
2141 					adev->gfx.config.gb_addr_config,
2142 					GB_ADDR_CONFIG,
2143 					NUM_PIPES);
2144 
2145 	adev->gfx.config.max_tile_pipes =
2146 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2147 
2148 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2149 			REG_GET_FIELD(
2150 					adev->gfx.config.gb_addr_config,
2151 					GB_ADDR_CONFIG,
2152 					NUM_BANKS);
2153 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2154 			REG_GET_FIELD(
2155 					adev->gfx.config.gb_addr_config,
2156 					GB_ADDR_CONFIG,
2157 					MAX_COMPRESSED_FRAGS);
2158 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2159 			REG_GET_FIELD(
2160 					adev->gfx.config.gb_addr_config,
2161 					GB_ADDR_CONFIG,
2162 					NUM_RB_PER_SE);
2163 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2164 			REG_GET_FIELD(
2165 					adev->gfx.config.gb_addr_config,
2166 					GB_ADDR_CONFIG,
2167 					NUM_SHADER_ENGINES);
2168 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2169 			REG_GET_FIELD(
2170 					adev->gfx.config.gb_addr_config,
2171 					GB_ADDR_CONFIG,
2172 					PIPE_INTERLEAVE_SIZE));
2173 
2174 	return 0;
2175 }
2176 
2177 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2178 				      int mec, int pipe, int queue)
2179 {
2180 	unsigned irq_type;
2181 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2182 	unsigned int hw_prio;
2183 
2184 	ring = &adev->gfx.compute_ring[ring_id];
2185 
2186 	/* mec0 is me1 */
2187 	ring->me = mec + 1;
2188 	ring->pipe = pipe;
2189 	ring->queue = queue;
2190 
2191 	ring->ring_obj = NULL;
2192 	ring->use_doorbell = true;
2193 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2194 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2195 				+ (ring_id * GFX9_MEC_HPD_SIZE);
2196 	ring->vm_hub = AMDGPU_GFXHUB(0);
2197 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2198 
2199 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2200 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2201 		+ ring->pipe;
2202 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
2203 			AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
2204 	/* type-2 packets are deprecated on MEC, use type-3 instead */
2205 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
2206 				hw_prio, NULL);
2207 }
2208 
2209 static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
2210 {
2211 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
2212 	uint32_t *ptr;
2213 	uint32_t inst;
2214 
2215 	ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
2216 	if (!ptr) {
2217 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
2218 		adev->gfx.ip_dump_core = NULL;
2219 	} else {
2220 		adev->gfx.ip_dump_core = ptr;
2221 	}
2222 
2223 	/* Allocate memory for compute queue registers for all the instances */
2224 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
2225 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
2226 		adev->gfx.mec.num_queue_per_pipe;
2227 
2228 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
2229 	if (!ptr) {
2230 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
2231 		adev->gfx.ip_dump_compute_queues = NULL;
2232 	} else {
2233 		adev->gfx.ip_dump_compute_queues = ptr;
2234 	}
2235 }
2236 
2237 static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
2238 {
2239 	int i, j, k, r, ring_id;
2240 	int xcc_id = 0;
2241 	struct amdgpu_ring *ring;
2242 	struct amdgpu_device *adev = ip_block->adev;
2243 	unsigned int hw_prio;
2244 
2245 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2246 	case IP_VERSION(9, 0, 1):
2247 	case IP_VERSION(9, 2, 1):
2248 	case IP_VERSION(9, 4, 0):
2249 	case IP_VERSION(9, 2, 2):
2250 	case IP_VERSION(9, 1, 0):
2251 	case IP_VERSION(9, 4, 1):
2252 	case IP_VERSION(9, 3, 0):
2253 	case IP_VERSION(9, 4, 2):
2254 		adev->gfx.mec.num_mec = 2;
2255 		break;
2256 	default:
2257 		adev->gfx.mec.num_mec = 1;
2258 		break;
2259 	}
2260 
2261 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2262 	case IP_VERSION(9, 4, 2):
2263 		adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
2264 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
2265 		if (adev->gfx.mec_fw_version >= 88) {
2266 			adev->gfx.enable_cleaner_shader = true;
2267 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
2268 			if (r) {
2269 				adev->gfx.enable_cleaner_shader = false;
2270 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
2271 			}
2272 		}
2273 		break;
2274 	default:
2275 		adev->gfx.enable_cleaner_shader = false;
2276 		break;
2277 	}
2278 
2279 	adev->gfx.mec.num_pipe_per_mec = 4;
2280 	adev->gfx.mec.num_queue_per_pipe = 8;
2281 
2282 	/* EOP Event */
2283 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2284 	if (r)
2285 		return r;
2286 
2287 	/* Bad opcode Event */
2288 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
2289 			      GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
2290 			      &adev->gfx.bad_op_irq);
2291 	if (r)
2292 		return r;
2293 
2294 	/* Privileged reg */
2295 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2296 			      &adev->gfx.priv_reg_irq);
2297 	if (r)
2298 		return r;
2299 
2300 	/* Privileged inst */
2301 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2302 			      &adev->gfx.priv_inst_irq);
2303 	if (r)
2304 		return r;
2305 
2306 	/* ECC error */
2307 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2308 			      &adev->gfx.cp_ecc_error_irq);
2309 	if (r)
2310 		return r;
2311 
2312 	/* FUE error */
2313 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2314 			      &adev->gfx.cp_ecc_error_irq);
2315 	if (r)
2316 		return r;
2317 
2318 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2319 
2320 	if (adev->gfx.rlc.funcs) {
2321 		if (adev->gfx.rlc.funcs->init) {
2322 			r = adev->gfx.rlc.funcs->init(adev);
2323 			if (r) {
2324 				dev_err(adev->dev, "Failed to init rlc BOs!\n");
2325 				return r;
2326 			}
2327 		}
2328 	}
2329 
2330 	r = gfx_v9_0_mec_init(adev);
2331 	if (r) {
2332 		DRM_ERROR("Failed to init MEC BOs!\n");
2333 		return r;
2334 	}
2335 
2336 	/* set up the gfx ring */
2337 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2338 		ring = &adev->gfx.gfx_ring[i];
2339 		ring->ring_obj = NULL;
2340 		if (!i)
2341 			sprintf(ring->name, "gfx");
2342 		else
2343 			sprintf(ring->name, "gfx_%d", i);
2344 		ring->use_doorbell = true;
2345 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2346 
2347 		/* disable scheduler on the real ring */
2348 		ring->no_scheduler = adev->gfx.mcbp;
2349 		ring->vm_hub = AMDGPU_GFXHUB(0);
2350 		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2351 				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2352 				     AMDGPU_RING_PRIO_DEFAULT, NULL);
2353 		if (r)
2354 			return r;
2355 	}
2356 
2357 	/* set up the software rings */
2358 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
2359 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
2360 			ring = &adev->gfx.sw_gfx_ring[i];
2361 			ring->ring_obj = NULL;
2362 			sprintf(ring->name, amdgpu_sw_ring_name(i));
2363 			ring->use_doorbell = true;
2364 			ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2365 			ring->is_sw_ring = true;
2366 			hw_prio = amdgpu_sw_ring_priority(i);
2367 			ring->vm_hub = AMDGPU_GFXHUB(0);
2368 			r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2369 					     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
2370 					     NULL);
2371 			if (r)
2372 				return r;
2373 			ring->wptr = 0;
2374 		}
2375 
2376 		/* init the muxer and add software rings */
2377 		r = amdgpu_ring_mux_init(&adev->gfx.muxer, &adev->gfx.gfx_ring[0],
2378 					 GFX9_NUM_SW_GFX_RINGS);
2379 		if (r) {
2380 			DRM_ERROR("amdgpu_ring_mux_init failed(%d)\n", r);
2381 			return r;
2382 		}
2383 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
2384 			r = amdgpu_ring_mux_add_sw_ring(&adev->gfx.muxer,
2385 							&adev->gfx.sw_gfx_ring[i]);
2386 			if (r) {
2387 				DRM_ERROR("amdgpu_ring_mux_add_sw_ring failed(%d)\n", r);
2388 				return r;
2389 			}
2390 		}
2391 	}
2392 
2393 	/* set up the compute queues - allocate horizontally across pipes */
2394 	ring_id = 0;
2395 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2396 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2397 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2398 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
2399 								     k, j))
2400 					continue;
2401 
2402 				r = gfx_v9_0_compute_ring_init(adev,
2403 							       ring_id,
2404 							       i, k, j);
2405 				if (r)
2406 					return r;
2407 
2408 				ring_id++;
2409 			}
2410 		}
2411 	}
2412 
2413 	/* TODO: Add queue reset mask when FW fully supports it */
2414 	adev->gfx.gfx_supported_reset =
2415 		amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
2416 	adev->gfx.compute_supported_reset =
2417 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
2418 
2419 	r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
2420 	if (r) {
2421 		DRM_ERROR("Failed to init KIQ BOs!\n");
2422 		return r;
2423 	}
2424 
2425 	r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
2426 	if (r)
2427 		return r;
2428 
2429 	/* create MQD for all compute queues as wel as KIQ for SRIOV case */
2430 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0);
2431 	if (r)
2432 		return r;
2433 
2434 	adev->gfx.ce_ram_size = 0x8000;
2435 
2436 	r = gfx_v9_0_gpu_early_init(adev);
2437 	if (r)
2438 		return r;
2439 
2440 	if (amdgpu_gfx_ras_sw_init(adev)) {
2441 		dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
2442 		return -EINVAL;
2443 	}
2444 
2445 	gfx_v9_0_alloc_ip_dump(adev);
2446 
2447 	r = amdgpu_gfx_sysfs_init(adev);
2448 	if (r)
2449 		return r;
2450 
2451 	return 0;
2452 }
2453 
2454 
2455 static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
2456 {
2457 	int i;
2458 	struct amdgpu_device *adev = ip_block->adev;
2459 
2460 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
2461 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
2462 			amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]);
2463 		amdgpu_ring_mux_fini(&adev->gfx.muxer);
2464 	}
2465 
2466 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2467 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2468 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
2469 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2470 
2471 	amdgpu_gfx_mqd_sw_fini(adev, 0);
2472 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2473 	amdgpu_gfx_kiq_fini(adev, 0);
2474 
2475 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
2476 
2477 	gfx_v9_0_mec_fini(adev);
2478 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2479 				&adev->gfx.rlc.clear_state_gpu_addr,
2480 				(void **)&adev->gfx.rlc.cs_ptr);
2481 	if (adev->flags & AMD_IS_APU) {
2482 		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2483 				&adev->gfx.rlc.cp_table_gpu_addr,
2484 				(void **)&adev->gfx.rlc.cp_table_ptr);
2485 	}
2486 	gfx_v9_0_free_microcode(adev);
2487 
2488 	amdgpu_gfx_sysfs_fini(adev);
2489 
2490 	kfree(adev->gfx.ip_dump_core);
2491 	kfree(adev->gfx.ip_dump_compute_queues);
2492 
2493 	return 0;
2494 }
2495 
2496 
2497 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2498 {
2499 	/* TODO */
2500 }
2501 
2502 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2503 			   u32 instance, int xcc_id)
2504 {
2505 	u32 data;
2506 
2507 	if (instance == 0xffffffff)
2508 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2509 	else
2510 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2511 
2512 	if (se_num == 0xffffffff)
2513 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2514 	else
2515 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2516 
2517 	if (sh_num == 0xffffffff)
2518 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2519 	else
2520 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2521 
2522 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2523 }
2524 
2525 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2526 {
2527 	u32 data, mask;
2528 
2529 	data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2530 	data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2531 
2532 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2533 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2534 
2535 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2536 					 adev->gfx.config.max_sh_per_se);
2537 
2538 	return (~data) & mask;
2539 }
2540 
2541 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2542 {
2543 	int i, j;
2544 	u32 data;
2545 	u32 active_rbs = 0;
2546 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2547 					adev->gfx.config.max_sh_per_se;
2548 
2549 	mutex_lock(&adev->grbm_idx_mutex);
2550 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2551 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2552 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
2553 			data = gfx_v9_0_get_rb_active_bitmap(adev);
2554 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2555 					       rb_bitmap_width_per_sh);
2556 		}
2557 	}
2558 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2559 	mutex_unlock(&adev->grbm_idx_mutex);
2560 
2561 	adev->gfx.config.backend_enable_mask = active_rbs;
2562 	adev->gfx.config.num_rbs = hweight32(active_rbs);
2563 }
2564 
2565 static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev,
2566 				uint32_t first_vmid,
2567 				uint32_t last_vmid)
2568 {
2569 	uint32_t data;
2570 	uint32_t trap_config_vmid_mask = 0;
2571 	int i;
2572 
2573 	/* Calculate trap config vmid mask */
2574 	for (i = first_vmid; i < last_vmid; i++)
2575 		trap_config_vmid_mask |= (1 << i);
2576 
2577 	data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG,
2578 			VMID_SEL, trap_config_vmid_mask);
2579 	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
2580 			TRAP_EN, 1);
2581 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
2582 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
2583 
2584 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
2585 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
2586 }
2587 
2588 #define DEFAULT_SH_MEM_BASES	(0x6000)
2589 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2590 {
2591 	int i;
2592 	uint32_t sh_mem_config;
2593 	uint32_t sh_mem_bases;
2594 
2595 	/*
2596 	 * Configure apertures:
2597 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2598 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2599 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2600 	 */
2601 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2602 
2603 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2604 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2605 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2606 
2607 	mutex_lock(&adev->srbm_mutex);
2608 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2609 		soc15_grbm_select(adev, 0, 0, 0, i, 0);
2610 		/* CP and shaders */
2611 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2612 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2613 	}
2614 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
2615 	mutex_unlock(&adev->srbm_mutex);
2616 
2617 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
2618 	   access. These should be enabled by FW for target VMIDs. */
2619 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2620 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2621 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2622 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2623 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2624 	}
2625 }
2626 
2627 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2628 {
2629 	int vmid;
2630 
2631 	/*
2632 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2633 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
2634 	 * the driver can enable them for graphics. VMID0 should maintain
2635 	 * access so that HWS firmware can save/restore entries.
2636 	 */
2637 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2638 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2639 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2640 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2641 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2642 	}
2643 }
2644 
2645 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2646 {
2647 	uint32_t tmp;
2648 
2649 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2650 	case IP_VERSION(9, 4, 1):
2651 		tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2652 		tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
2653 				!READ_ONCE(adev->barrier_has_auto_waitcnt));
2654 		WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2655 		break;
2656 	default:
2657 		break;
2658 	}
2659 }
2660 
2661 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2662 {
2663 	u32 tmp;
2664 	int i;
2665 
2666 	if (!amdgpu_sriov_vf(adev) ||
2667 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) {
2668 		WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2669 	}
2670 
2671 	gfx_v9_0_tiling_mode_table_init(adev);
2672 
2673 	if (adev->gfx.num_gfx_rings)
2674 		gfx_v9_0_setup_rb(adev);
2675 	gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2676 	adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2677 
2678 	/* XXX SH_MEM regs */
2679 	/* where to put LDS, scratch, GPUVM in FSA64 space */
2680 	mutex_lock(&adev->srbm_mutex);
2681 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
2682 		soc15_grbm_select(adev, 0, 0, 0, i, 0);
2683 		/* CP and shaders */
2684 		if (i == 0) {
2685 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2686 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2687 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2688 					    !!adev->gmc.noretry);
2689 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2690 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2691 		} else {
2692 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2693 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2694 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2695 					    !!adev->gmc.noretry);
2696 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2697 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2698 				(adev->gmc.private_aperture_start >> 48));
2699 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2700 				(adev->gmc.shared_aperture_start >> 48));
2701 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2702 		}
2703 	}
2704 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
2705 
2706 	mutex_unlock(&adev->srbm_mutex);
2707 
2708 	gfx_v9_0_init_compute_vmid(adev);
2709 	gfx_v9_0_init_gds_vmid(adev);
2710 	gfx_v9_0_init_sq_config(adev);
2711 }
2712 
2713 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2714 {
2715 	u32 i, j, k;
2716 	u32 mask;
2717 
2718 	mutex_lock(&adev->grbm_idx_mutex);
2719 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2720 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2721 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
2722 			for (k = 0; k < adev->usec_timeout; k++) {
2723 				if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2724 					break;
2725 				udelay(1);
2726 			}
2727 			if (k == adev->usec_timeout) {
2728 				amdgpu_gfx_select_se_sh(adev, 0xffffffff,
2729 						      0xffffffff, 0xffffffff, 0);
2730 				mutex_unlock(&adev->grbm_idx_mutex);
2731 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2732 					 i, j);
2733 				return;
2734 			}
2735 		}
2736 	}
2737 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2738 	mutex_unlock(&adev->grbm_idx_mutex);
2739 
2740 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2741 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2742 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2743 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2744 	for (k = 0; k < adev->usec_timeout; k++) {
2745 		if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2746 			break;
2747 		udelay(1);
2748 	}
2749 }
2750 
2751 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2752 					       bool enable)
2753 {
2754 	u32 tmp;
2755 
2756 	/* These interrupts should be enabled to drive DS clock */
2757 
2758 	tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2759 
2760 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2761 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2762 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2763 	if (adev->gfx.num_gfx_rings)
2764 		tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2765 
2766 	WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2767 }
2768 
2769 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2770 {
2771 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2772 	/* csib */
2773 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2774 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2775 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2776 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2777 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2778 			adev->gfx.rlc.clear_state_size);
2779 }
2780 
2781 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2782 				int indirect_offset,
2783 				int list_size,
2784 				int *unique_indirect_regs,
2785 				int unique_indirect_reg_count,
2786 				int *indirect_start_offsets,
2787 				int *indirect_start_offsets_count,
2788 				int max_start_offsets_count)
2789 {
2790 	int idx;
2791 
2792 	for (; indirect_offset < list_size; indirect_offset++) {
2793 		WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2794 		indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2795 		*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2796 
2797 		while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2798 			indirect_offset += 2;
2799 
2800 			/* look for the matching indice */
2801 			for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2802 				if (unique_indirect_regs[idx] ==
2803 					register_list_format[indirect_offset] ||
2804 					!unique_indirect_regs[idx])
2805 					break;
2806 			}
2807 
2808 			BUG_ON(idx >= unique_indirect_reg_count);
2809 
2810 			if (!unique_indirect_regs[idx])
2811 				unique_indirect_regs[idx] = register_list_format[indirect_offset];
2812 
2813 			indirect_offset++;
2814 		}
2815 	}
2816 }
2817 
2818 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2819 {
2820 	int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2821 	int unique_indirect_reg_count = 0;
2822 
2823 	int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2824 	int indirect_start_offsets_count = 0;
2825 
2826 	int list_size = 0;
2827 	int i = 0, j = 0;
2828 	u32 tmp = 0;
2829 
2830 	u32 *register_list_format =
2831 		kmemdup(adev->gfx.rlc.register_list_format,
2832 			adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2833 	if (!register_list_format)
2834 		return -ENOMEM;
2835 
2836 	/* setup unique_indirect_regs array and indirect_start_offsets array */
2837 	unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2838 	gfx_v9_1_parse_ind_reg_list(register_list_format,
2839 				    adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2840 				    adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2841 				    unique_indirect_regs,
2842 				    unique_indirect_reg_count,
2843 				    indirect_start_offsets,
2844 				    &indirect_start_offsets_count,
2845 				    ARRAY_SIZE(indirect_start_offsets));
2846 
2847 	/* enable auto inc in case it is disabled */
2848 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2849 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2850 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2851 
2852 	/* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2853 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2854 		RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2855 	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2856 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2857 			adev->gfx.rlc.register_restore[i]);
2858 
2859 	/* load indirect register */
2860 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2861 		adev->gfx.rlc.reg_list_format_start);
2862 
2863 	/* direct register portion */
2864 	for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2865 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2866 			register_list_format[i]);
2867 
2868 	/* indirect register portion */
2869 	while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2870 		if (register_list_format[i] == 0xFFFFFFFF) {
2871 			WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2872 			continue;
2873 		}
2874 
2875 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2876 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2877 
2878 		for (j = 0; j < unique_indirect_reg_count; j++) {
2879 			if (register_list_format[i] == unique_indirect_regs[j]) {
2880 				WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2881 				break;
2882 			}
2883 		}
2884 
2885 		BUG_ON(j >= unique_indirect_reg_count);
2886 
2887 		i++;
2888 	}
2889 
2890 	/* set save/restore list size */
2891 	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2892 	list_size = list_size >> 1;
2893 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2894 		adev->gfx.rlc.reg_restore_list_size);
2895 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2896 
2897 	/* write the starting offsets to RLC scratch ram */
2898 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2899 		adev->gfx.rlc.starting_offsets_start);
2900 	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2901 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2902 		       indirect_start_offsets[i]);
2903 
2904 	/* load unique indirect regs*/
2905 	for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2906 		if (unique_indirect_regs[i] != 0) {
2907 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2908 			       + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2909 			       unique_indirect_regs[i] & 0x3FFFF);
2910 
2911 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2912 			       + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2913 			       unique_indirect_regs[i] >> 20);
2914 		}
2915 	}
2916 
2917 	kfree(register_list_format);
2918 	return 0;
2919 }
2920 
2921 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2922 {
2923 	WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2924 }
2925 
2926 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2927 					     bool enable)
2928 {
2929 	uint32_t data = 0;
2930 	uint32_t default_data = 0;
2931 
2932 	default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2933 	if (enable) {
2934 		/* enable GFXIP control over CGPG */
2935 		data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2936 		if(default_data != data)
2937 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2938 
2939 		/* update status */
2940 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2941 		data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2942 		if(default_data != data)
2943 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2944 	} else {
2945 		/* restore GFXIP control over GCPG */
2946 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2947 		if(default_data != data)
2948 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2949 	}
2950 }
2951 
2952 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2953 {
2954 	uint32_t data = 0;
2955 
2956 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2957 			      AMD_PG_SUPPORT_GFX_SMG |
2958 			      AMD_PG_SUPPORT_GFX_DMG)) {
2959 		/* init IDLE_POLL_COUNT = 60 */
2960 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2961 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2962 		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2963 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2964 
2965 		/* init RLC PG Delay */
2966 		data = 0;
2967 		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2968 		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2969 		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2970 		data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2971 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2972 
2973 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2974 		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2975 		data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2976 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2977 
2978 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2979 		data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2980 		data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2981 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2982 
2983 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2984 		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2985 
2986 		/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2987 		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2988 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2989 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 3, 0))
2990 			pwr_10_0_gfxip_control_over_cgpg(adev, true);
2991 	}
2992 }
2993 
2994 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2995 						bool enable)
2996 {
2997 	uint32_t data = 0;
2998 	uint32_t default_data = 0;
2999 
3000 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3001 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3002 			     SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
3003 			     enable ? 1 : 0);
3004 	if (default_data != data)
3005 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3006 }
3007 
3008 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
3009 						bool enable)
3010 {
3011 	uint32_t data = 0;
3012 	uint32_t default_data = 0;
3013 
3014 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3015 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3016 			     SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
3017 			     enable ? 1 : 0);
3018 	if(default_data != data)
3019 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3020 }
3021 
3022 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
3023 					bool enable)
3024 {
3025 	uint32_t data = 0;
3026 	uint32_t default_data = 0;
3027 
3028 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3029 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3030 			     CP_PG_DISABLE,
3031 			     enable ? 0 : 1);
3032 	if(default_data != data)
3033 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3034 }
3035 
3036 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
3037 						bool enable)
3038 {
3039 	uint32_t data, default_data;
3040 
3041 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3042 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3043 			     GFX_POWER_GATING_ENABLE,
3044 			     enable ? 1 : 0);
3045 	if(default_data != data)
3046 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3047 }
3048 
3049 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
3050 						bool enable)
3051 {
3052 	uint32_t data, default_data;
3053 
3054 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3055 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3056 			     GFX_PIPELINE_PG_ENABLE,
3057 			     enable ? 1 : 0);
3058 	if(default_data != data)
3059 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3060 
3061 	if (!enable)
3062 		/* read any GFX register to wake up GFX */
3063 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
3064 }
3065 
3066 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
3067 						       bool enable)
3068 {
3069 	uint32_t data, default_data;
3070 
3071 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3072 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3073 			     STATIC_PER_CU_PG_ENABLE,
3074 			     enable ? 1 : 0);
3075 	if(default_data != data)
3076 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3077 }
3078 
3079 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
3080 						bool enable)
3081 {
3082 	uint32_t data, default_data;
3083 
3084 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3085 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3086 			     DYN_PER_CU_PG_ENABLE,
3087 			     enable ? 1 : 0);
3088 	if(default_data != data)
3089 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3090 }
3091 
3092 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
3093 {
3094 	gfx_v9_0_init_csb(adev);
3095 
3096 	/*
3097 	 * Rlc save restore list is workable since v2_1.
3098 	 * And it's needed by gfxoff feature.
3099 	 */
3100 	if (adev->gfx.rlc.is_rlc_v2_1) {
3101 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
3102 			    IP_VERSION(9, 2, 1) ||
3103 		    (adev->apu_flags & AMD_APU_IS_RAVEN2))
3104 			gfx_v9_1_init_rlc_save_restore_list(adev);
3105 		gfx_v9_0_enable_save_restore_machine(adev);
3106 	}
3107 
3108 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3109 			      AMD_PG_SUPPORT_GFX_SMG |
3110 			      AMD_PG_SUPPORT_GFX_DMG |
3111 			      AMD_PG_SUPPORT_CP |
3112 			      AMD_PG_SUPPORT_GDS |
3113 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
3114 		WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
3115 			     adev->gfx.rlc.cp_table_gpu_addr >> 8);
3116 		gfx_v9_0_init_gfx_power_gating(adev);
3117 	}
3118 }
3119 
3120 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3121 {
3122 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3123 	gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3124 	gfx_v9_0_wait_for_rlc_serdes(adev);
3125 }
3126 
3127 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3128 {
3129 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3130 	udelay(50);
3131 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3132 	udelay(50);
3133 }
3134 
3135 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3136 {
3137 #ifdef AMDGPU_RLC_DEBUG_RETRY
3138 	u32 rlc_ucode_ver;
3139 #endif
3140 
3141 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3142 	udelay(50);
3143 
3144 	/* carrizo do enable cp interrupt after cp inited */
3145 	if (!(adev->flags & AMD_IS_APU)) {
3146 		gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3147 		udelay(50);
3148 	}
3149 
3150 #ifdef AMDGPU_RLC_DEBUG_RETRY
3151 	/* RLC_GPM_GENERAL_6 : RLC Ucode version */
3152 	rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3153 	if(rlc_ucode_ver == 0x108) {
3154 		DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3155 				rlc_ucode_ver, adev->gfx.rlc_fw_version);
3156 		/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3157 		 * default is 0x9C4 to create a 100us interval */
3158 		WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3159 		/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3160 		 * to disable the page fault retry interrupts, default is
3161 		 * 0x100 (256) */
3162 		WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3163 	}
3164 #endif
3165 }
3166 
3167 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3168 {
3169 	const struct rlc_firmware_header_v2_0 *hdr;
3170 	const __le32 *fw_data;
3171 	unsigned i, fw_size;
3172 
3173 	if (!adev->gfx.rlc_fw)
3174 		return -EINVAL;
3175 
3176 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3177 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3178 
3179 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3180 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3181 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3182 
3183 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3184 			RLCG_UCODE_LOADING_START_ADDRESS);
3185 	for (i = 0; i < fw_size; i++)
3186 		WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3187 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3188 
3189 	return 0;
3190 }
3191 
3192 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3193 {
3194 	int r;
3195 
3196 	if (amdgpu_sriov_vf(adev)) {
3197 		gfx_v9_0_init_csb(adev);
3198 		return 0;
3199 	}
3200 
3201 	adev->gfx.rlc.funcs->stop(adev);
3202 
3203 	/* disable CG */
3204 	WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3205 
3206 	gfx_v9_0_init_pg(adev);
3207 
3208 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3209 		/* legacy rlc firmware loading */
3210 		r = gfx_v9_0_rlc_load_microcode(adev);
3211 		if (r)
3212 			return r;
3213 	}
3214 
3215 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3216 	case IP_VERSION(9, 2, 2):
3217 	case IP_VERSION(9, 1, 0):
3218 		gfx_v9_0_init_lbpw(adev);
3219 		if (amdgpu_lbpw == 0)
3220 			gfx_v9_0_enable_lbpw(adev, false);
3221 		else
3222 			gfx_v9_0_enable_lbpw(adev, true);
3223 		break;
3224 	case IP_VERSION(9, 4, 0):
3225 		gfx_v9_4_init_lbpw(adev);
3226 		if (amdgpu_lbpw > 0)
3227 			gfx_v9_0_enable_lbpw(adev, true);
3228 		else
3229 			gfx_v9_0_enable_lbpw(adev, false);
3230 		break;
3231 	default:
3232 		break;
3233 	}
3234 
3235 	gfx_v9_0_update_spm_vmid_internal(adev, 0xf);
3236 
3237 	adev->gfx.rlc.funcs->start(adev);
3238 
3239 	return 0;
3240 }
3241 
3242 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3243 {
3244 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3245 
3246 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1);
3247 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1);
3248 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1);
3249 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1);
3250 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1);
3251 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1);
3252 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1);
3253 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1);
3254 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1);
3255 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3256 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3257 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3258 	WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3259 	udelay(50);
3260 }
3261 
3262 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3263 {
3264 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
3265 	const struct gfx_firmware_header_v1_0 *ce_hdr;
3266 	const struct gfx_firmware_header_v1_0 *me_hdr;
3267 	const __le32 *fw_data;
3268 	unsigned i, fw_size;
3269 
3270 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3271 		return -EINVAL;
3272 
3273 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3274 		adev->gfx.pfp_fw->data;
3275 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3276 		adev->gfx.ce_fw->data;
3277 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
3278 		adev->gfx.me_fw->data;
3279 
3280 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3281 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3282 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3283 
3284 	gfx_v9_0_cp_gfx_enable(adev, false);
3285 
3286 	/* PFP */
3287 	fw_data = (const __le32 *)
3288 		(adev->gfx.pfp_fw->data +
3289 		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3290 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3291 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3292 	for (i = 0; i < fw_size; i++)
3293 		WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3294 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3295 
3296 	/* CE */
3297 	fw_data = (const __le32 *)
3298 		(adev->gfx.ce_fw->data +
3299 		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3300 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3301 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3302 	for (i = 0; i < fw_size; i++)
3303 		WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3304 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3305 
3306 	/* ME */
3307 	fw_data = (const __le32 *)
3308 		(adev->gfx.me_fw->data +
3309 		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3310 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3311 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3312 	for (i = 0; i < fw_size; i++)
3313 		WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3314 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3315 
3316 	return 0;
3317 }
3318 
3319 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3320 {
3321 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3322 	const struct cs_section_def *sect = NULL;
3323 	const struct cs_extent_def *ext = NULL;
3324 	int r, i, tmp;
3325 
3326 	/* init the CP */
3327 	WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3328 	WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3329 
3330 	gfx_v9_0_cp_gfx_enable(adev, true);
3331 
3332 	/* Now only limit the quirk on the APU gfx9 series and already
3333 	 * confirmed that the APU gfx10/gfx11 needn't such update.
3334 	 */
3335 	if (adev->flags & AMD_IS_APU &&
3336 			adev->in_s3 && !pm_resume_via_firmware()) {
3337 		DRM_INFO("Will skip the CSB packet resubmit\n");
3338 		return 0;
3339 	}
3340 	r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3341 	if (r) {
3342 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3343 		return r;
3344 	}
3345 
3346 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3347 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3348 
3349 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3350 	amdgpu_ring_write(ring, 0x80000000);
3351 	amdgpu_ring_write(ring, 0x80000000);
3352 
3353 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3354 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3355 			if (sect->id == SECT_CONTEXT) {
3356 				amdgpu_ring_write(ring,
3357 				       PACKET3(PACKET3_SET_CONTEXT_REG,
3358 					       ext->reg_count));
3359 				amdgpu_ring_write(ring,
3360 				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3361 				for (i = 0; i < ext->reg_count; i++)
3362 					amdgpu_ring_write(ring, ext->extent[i]);
3363 			}
3364 		}
3365 	}
3366 
3367 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3368 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3369 
3370 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3371 	amdgpu_ring_write(ring, 0);
3372 
3373 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3374 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3375 	amdgpu_ring_write(ring, 0x8000);
3376 	amdgpu_ring_write(ring, 0x8000);
3377 
3378 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3379 	tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3380 		(SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3381 	amdgpu_ring_write(ring, tmp);
3382 	amdgpu_ring_write(ring, 0);
3383 
3384 	amdgpu_ring_commit(ring);
3385 
3386 	return 0;
3387 }
3388 
3389 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3390 {
3391 	struct amdgpu_ring *ring;
3392 	u32 tmp;
3393 	u32 rb_bufsz;
3394 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
3395 
3396 	/* Set the write pointer delay */
3397 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3398 
3399 	/* set the RB to use vmid 0 */
3400 	WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3401 
3402 	/* Set ring buffer size */
3403 	ring = &adev->gfx.gfx_ring[0];
3404 	rb_bufsz = order_base_2(ring->ring_size / 8);
3405 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3406 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3407 #ifdef __BIG_ENDIAN
3408 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3409 #endif
3410 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3411 
3412 	/* Initialize the ring buffer's write pointers */
3413 	ring->wptr = 0;
3414 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3415 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3416 
3417 	/* set the wb address whether it's enabled or not */
3418 	rptr_addr = ring->rptr_gpu_addr;
3419 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3420 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3421 
3422 	wptr_gpu_addr = ring->wptr_gpu_addr;
3423 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3424 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3425 
3426 	mdelay(1);
3427 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3428 
3429 	rb_addr = ring->gpu_addr >> 8;
3430 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3431 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3432 
3433 	tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3434 	if (ring->use_doorbell) {
3435 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3436 				    DOORBELL_OFFSET, ring->doorbell_index);
3437 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3438 				    DOORBELL_EN, 1);
3439 	} else {
3440 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3441 	}
3442 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3443 
3444 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3445 			DOORBELL_RANGE_LOWER, ring->doorbell_index);
3446 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3447 
3448 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3449 		       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3450 
3451 
3452 	/* start the ring */
3453 	gfx_v9_0_cp_gfx_start(adev);
3454 
3455 	return 0;
3456 }
3457 
3458 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3459 {
3460 	if (enable) {
3461 		WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3462 	} else {
3463 		WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3464 				 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
3465 				  CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
3466 				  CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
3467 				  CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
3468 				  CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
3469 				  CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
3470 				  CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
3471 				  CP_MEC_CNTL__MEC_ME1_HALT_MASK |
3472 				  CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3473 		adev->gfx.kiq[0].ring.sched.ready = false;
3474 	}
3475 	udelay(50);
3476 }
3477 
3478 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3479 {
3480 	const struct gfx_firmware_header_v1_0 *mec_hdr;
3481 	const __le32 *fw_data;
3482 	unsigned i;
3483 	u32 tmp;
3484 
3485 	if (!adev->gfx.mec_fw)
3486 		return -EINVAL;
3487 
3488 	gfx_v9_0_cp_compute_enable(adev, false);
3489 
3490 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3491 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3492 
3493 	fw_data = (const __le32 *)
3494 		(adev->gfx.mec_fw->data +
3495 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3496 	tmp = 0;
3497 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3498 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3499 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3500 
3501 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3502 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3503 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3504 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3505 
3506 	/* MEC1 */
3507 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3508 			 mec_hdr->jt_offset);
3509 	for (i = 0; i < mec_hdr->jt_size; i++)
3510 		WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3511 			le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3512 
3513 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3514 			adev->gfx.mec_fw_version);
3515 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3516 
3517 	return 0;
3518 }
3519 
3520 /* KIQ functions */
3521 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3522 {
3523 	uint32_t tmp;
3524 	struct amdgpu_device *adev = ring->adev;
3525 
3526 	/* tell RLC which is KIQ queue */
3527 	tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3528 	tmp &= 0xffffff00;
3529 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3530 	WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
3531 }
3532 
3533 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3534 {
3535 	struct amdgpu_device *adev = ring->adev;
3536 
3537 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3538 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
3539 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3540 			mqd->cp_hqd_queue_priority =
3541 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3542 		}
3543 	}
3544 }
3545 
3546 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3547 {
3548 	struct amdgpu_device *adev = ring->adev;
3549 	struct v9_mqd *mqd = ring->mqd_ptr;
3550 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3551 	uint32_t tmp;
3552 
3553 	mqd->header = 0xC0310800;
3554 	mqd->compute_pipelinestat_enable = 0x00000001;
3555 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3556 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3557 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3558 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3559 	mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3560 	mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3561 	mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3562 	mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3563 	mqd->compute_misc_reserved = 0x00000003;
3564 
3565 	mqd->dynamic_cu_mask_addr_lo =
3566 		lower_32_bits(ring->mqd_gpu_addr
3567 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3568 	mqd->dynamic_cu_mask_addr_hi =
3569 		upper_32_bits(ring->mqd_gpu_addr
3570 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3571 
3572 	eop_base_addr = ring->eop_gpu_addr >> 8;
3573 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3574 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3575 
3576 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3577 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3578 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3579 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3580 
3581 	mqd->cp_hqd_eop_control = tmp;
3582 
3583 	/* enable doorbell? */
3584 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3585 
3586 	if (ring->use_doorbell) {
3587 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3588 				    DOORBELL_OFFSET, ring->doorbell_index);
3589 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3590 				    DOORBELL_EN, 1);
3591 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3592 				    DOORBELL_SOURCE, 0);
3593 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3594 				    DOORBELL_HIT, 0);
3595 	} else {
3596 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3597 					 DOORBELL_EN, 0);
3598 	}
3599 
3600 	mqd->cp_hqd_pq_doorbell_control = tmp;
3601 
3602 	/* disable the queue if it's active */
3603 	ring->wptr = 0;
3604 	mqd->cp_hqd_dequeue_request = 0;
3605 	mqd->cp_hqd_pq_rptr = 0;
3606 	mqd->cp_hqd_pq_wptr_lo = 0;
3607 	mqd->cp_hqd_pq_wptr_hi = 0;
3608 
3609 	/* set the pointer to the MQD */
3610 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3611 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3612 
3613 	/* set MQD vmid to 0 */
3614 	tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3615 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3616 	mqd->cp_mqd_control = tmp;
3617 
3618 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3619 	hqd_gpu_addr = ring->gpu_addr >> 8;
3620 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3621 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3622 
3623 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3624 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3625 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3626 			    (order_base_2(ring->ring_size / 4) - 1));
3627 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3628 			(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3629 #ifdef __BIG_ENDIAN
3630 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3631 #endif
3632 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3633 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3634 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3635 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3636 	mqd->cp_hqd_pq_control = tmp;
3637 
3638 	/* set the wb address whether it's enabled or not */
3639 	wb_gpu_addr = ring->rptr_gpu_addr;
3640 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3641 	mqd->cp_hqd_pq_rptr_report_addr_hi =
3642 		upper_32_bits(wb_gpu_addr) & 0xffff;
3643 
3644 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3645 	wb_gpu_addr = ring->wptr_gpu_addr;
3646 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3647 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3648 
3649 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3650 	ring->wptr = 0;
3651 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3652 
3653 	/* set the vmid for the queue */
3654 	mqd->cp_hqd_vmid = 0;
3655 
3656 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3657 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3658 	mqd->cp_hqd_persistent_state = tmp;
3659 
3660 	/* set MIN_IB_AVAIL_SIZE */
3661 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3662 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3663 	mqd->cp_hqd_ib_control = tmp;
3664 
3665 	/* set static priority for a queue/ring */
3666 	gfx_v9_0_mqd_set_priority(ring, mqd);
3667 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
3668 
3669 	/* map_queues packet doesn't need activate the queue,
3670 	 * so only kiq need set this field.
3671 	 */
3672 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3673 		mqd->cp_hqd_active = 1;
3674 
3675 	return 0;
3676 }
3677 
3678 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3679 {
3680 	struct amdgpu_device *adev = ring->adev;
3681 	struct v9_mqd *mqd = ring->mqd_ptr;
3682 	int j;
3683 
3684 	/* disable wptr polling */
3685 	WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3686 
3687 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3688 	       mqd->cp_hqd_eop_base_addr_lo);
3689 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3690 	       mqd->cp_hqd_eop_base_addr_hi);
3691 
3692 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3693 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3694 	       mqd->cp_hqd_eop_control);
3695 
3696 	/* enable doorbell? */
3697 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3698 	       mqd->cp_hqd_pq_doorbell_control);
3699 
3700 	/* disable the queue if it's active */
3701 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3702 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3703 		for (j = 0; j < adev->usec_timeout; j++) {
3704 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3705 				break;
3706 			udelay(1);
3707 		}
3708 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3709 		       mqd->cp_hqd_dequeue_request);
3710 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3711 		       mqd->cp_hqd_pq_rptr);
3712 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3713 		       mqd->cp_hqd_pq_wptr_lo);
3714 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3715 		       mqd->cp_hqd_pq_wptr_hi);
3716 	}
3717 
3718 	/* set the pointer to the MQD */
3719 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3720 	       mqd->cp_mqd_base_addr_lo);
3721 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3722 	       mqd->cp_mqd_base_addr_hi);
3723 
3724 	/* set MQD vmid to 0 */
3725 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3726 	       mqd->cp_mqd_control);
3727 
3728 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3729 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3730 	       mqd->cp_hqd_pq_base_lo);
3731 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3732 	       mqd->cp_hqd_pq_base_hi);
3733 
3734 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3735 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3736 	       mqd->cp_hqd_pq_control);
3737 
3738 	/* set the wb address whether it's enabled or not */
3739 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3740 				mqd->cp_hqd_pq_rptr_report_addr_lo);
3741 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3742 				mqd->cp_hqd_pq_rptr_report_addr_hi);
3743 
3744 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3745 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3746 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3747 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3748 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3749 
3750 	/* enable the doorbell if requested */
3751 	if (ring->use_doorbell) {
3752 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3753 					(adev->doorbell_index.kiq * 2) << 2);
3754 		/* If GC has entered CGPG, ringing doorbell > first page
3755 		 * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
3756 		 * workaround this issue. And this change has to align with firmware
3757 		 * update.
3758 		 */
3759 		if (check_if_enlarge_doorbell_range(adev))
3760 			WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3761 					(adev->doorbell.size - 4));
3762 		else
3763 			WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3764 					(adev->doorbell_index.userqueue_end * 2) << 2);
3765 	}
3766 
3767 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3768 	       mqd->cp_hqd_pq_doorbell_control);
3769 
3770 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3771 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3772 	       mqd->cp_hqd_pq_wptr_lo);
3773 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3774 	       mqd->cp_hqd_pq_wptr_hi);
3775 
3776 	/* set the vmid for the queue */
3777 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3778 
3779 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3780 	       mqd->cp_hqd_persistent_state);
3781 
3782 	/* activate the queue */
3783 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3784 	       mqd->cp_hqd_active);
3785 
3786 	if (ring->use_doorbell)
3787 		WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3788 
3789 	return 0;
3790 }
3791 
3792 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3793 {
3794 	struct amdgpu_device *adev = ring->adev;
3795 	int j;
3796 
3797 	/* disable the queue if it's active */
3798 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3799 
3800 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3801 
3802 		for (j = 0; j < adev->usec_timeout; j++) {
3803 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3804 				break;
3805 			udelay(1);
3806 		}
3807 
3808 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3809 			DRM_DEBUG("KIQ dequeue request failed.\n");
3810 
3811 			/* Manual disable if dequeue request times out */
3812 			WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3813 		}
3814 
3815 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3816 		      0);
3817 	}
3818 
3819 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3820 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3821 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3822 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3823 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3824 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3825 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3826 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3827 
3828 	return 0;
3829 }
3830 
3831 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3832 {
3833 	struct amdgpu_device *adev = ring->adev;
3834 	struct v9_mqd *mqd = ring->mqd_ptr;
3835 	struct v9_mqd *tmp_mqd;
3836 
3837 	gfx_v9_0_kiq_setting(ring);
3838 
3839 	/* GPU could be in bad state during probe, driver trigger the reset
3840 	 * after load the SMU, in this case , the mqd is not be initialized.
3841 	 * driver need to re-init the mqd.
3842 	 * check mqd->cp_hqd_pq_control since this value should not be 0
3843 	 */
3844 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup;
3845 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
3846 		/* for GPU_RESET case , reset MQD to a clean status */
3847 		if (adev->gfx.kiq[0].mqd_backup)
3848 			memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation));
3849 
3850 		/* reset ring buffer */
3851 		ring->wptr = 0;
3852 		amdgpu_ring_clear_ring(ring);
3853 
3854 		mutex_lock(&adev->srbm_mutex);
3855 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3856 		gfx_v9_0_kiq_init_register(ring);
3857 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3858 		mutex_unlock(&adev->srbm_mutex);
3859 	} else {
3860 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3861 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3862 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3863 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
3864 			amdgpu_ring_clear_ring(ring);
3865 		mutex_lock(&adev->srbm_mutex);
3866 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3867 		gfx_v9_0_mqd_init(ring);
3868 		gfx_v9_0_kiq_init_register(ring);
3869 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3870 		mutex_unlock(&adev->srbm_mutex);
3871 
3872 		if (adev->gfx.kiq[0].mqd_backup)
3873 			memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
3874 	}
3875 
3876 	return 0;
3877 }
3878 
3879 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
3880 {
3881 	struct amdgpu_device *adev = ring->adev;
3882 	struct v9_mqd *mqd = ring->mqd_ptr;
3883 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
3884 	struct v9_mqd *tmp_mqd;
3885 
3886 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
3887 	 * is not be initialized before
3888 	 */
3889 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3890 
3891 	if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
3892 	    (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
3893 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3894 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3895 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3896 		mutex_lock(&adev->srbm_mutex);
3897 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3898 		gfx_v9_0_mqd_init(ring);
3899 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3900 		mutex_unlock(&adev->srbm_mutex);
3901 
3902 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3903 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3904 	} else {
3905 		/* restore MQD to a clean status */
3906 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3907 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3908 		/* reset ring buffer */
3909 		ring->wptr = 0;
3910 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
3911 		amdgpu_ring_clear_ring(ring);
3912 	}
3913 
3914 	return 0;
3915 }
3916 
3917 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3918 {
3919 	gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
3920 	return 0;
3921 }
3922 
3923 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3924 {
3925 	int i, r;
3926 
3927 	gfx_v9_0_cp_compute_enable(adev, true);
3928 
3929 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3930 		r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
3931 		if (r)
3932 			return r;
3933 	}
3934 
3935 	return amdgpu_gfx_enable_kcq(adev, 0);
3936 }
3937 
3938 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3939 {
3940 	int r, i;
3941 	struct amdgpu_ring *ring;
3942 
3943 	if (!(adev->flags & AMD_IS_APU))
3944 		gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3945 
3946 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3947 		if (adev->gfx.num_gfx_rings) {
3948 			/* legacy firmware loading */
3949 			r = gfx_v9_0_cp_gfx_load_microcode(adev);
3950 			if (r)
3951 				return r;
3952 		}
3953 
3954 		r = gfx_v9_0_cp_compute_load_microcode(adev);
3955 		if (r)
3956 			return r;
3957 	}
3958 
3959 	if (adev->gfx.num_gfx_rings)
3960 		gfx_v9_0_cp_gfx_enable(adev, false);
3961 	gfx_v9_0_cp_compute_enable(adev, false);
3962 
3963 	r = gfx_v9_0_kiq_resume(adev);
3964 	if (r)
3965 		return r;
3966 
3967 	if (adev->gfx.num_gfx_rings) {
3968 		r = gfx_v9_0_cp_gfx_resume(adev);
3969 		if (r)
3970 			return r;
3971 	}
3972 
3973 	r = gfx_v9_0_kcq_resume(adev);
3974 	if (r)
3975 		return r;
3976 
3977 	if (adev->gfx.num_gfx_rings) {
3978 		ring = &adev->gfx.gfx_ring[0];
3979 		r = amdgpu_ring_test_helper(ring);
3980 		if (r)
3981 			return r;
3982 	}
3983 
3984 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3985 		ring = &adev->gfx.compute_ring[i];
3986 		amdgpu_ring_test_helper(ring);
3987 	}
3988 
3989 	gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3990 
3991 	return 0;
3992 }
3993 
3994 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3995 {
3996 	u32 tmp;
3997 
3998 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1) &&
3999 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
4000 		return;
4001 
4002 	tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
4003 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
4004 				adev->df.hash_status.hash_64k);
4005 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
4006 				adev->df.hash_status.hash_2m);
4007 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
4008 				adev->df.hash_status.hash_1g);
4009 	WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
4010 }
4011 
4012 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
4013 {
4014 	if (adev->gfx.num_gfx_rings)
4015 		gfx_v9_0_cp_gfx_enable(adev, enable);
4016 	gfx_v9_0_cp_compute_enable(adev, enable);
4017 }
4018 
4019 static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
4020 {
4021 	int r;
4022 	struct amdgpu_device *adev = ip_block->adev;
4023 
4024 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
4025 				       adev->gfx.cleaner_shader_ptr);
4026 
4027 	if (!amdgpu_sriov_vf(adev))
4028 		gfx_v9_0_init_golden_registers(adev);
4029 
4030 	gfx_v9_0_constants_init(adev);
4031 
4032 	gfx_v9_0_init_tcp_config(adev);
4033 
4034 	r = adev->gfx.rlc.funcs->resume(adev);
4035 	if (r)
4036 		return r;
4037 
4038 	r = gfx_v9_0_cp_resume(adev);
4039 	if (r)
4040 		return r;
4041 
4042 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) &&
4043 	    !amdgpu_sriov_vf(adev))
4044 		gfx_v9_4_2_set_power_brake_sequence(adev);
4045 
4046 	return r;
4047 }
4048 
4049 static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
4050 {
4051 	struct amdgpu_device *adev = ip_block->adev;
4052 
4053 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4054 		amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4055 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4056 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4057 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
4058 
4059 	/* DF freeze and kcq disable will fail */
4060 	if (!amdgpu_ras_intr_triggered())
4061 		/* disable KCQ to avoid CPC touch memory not valid anymore */
4062 		amdgpu_gfx_disable_kcq(adev, 0);
4063 
4064 	if (amdgpu_sriov_vf(adev)) {
4065 		gfx_v9_0_cp_gfx_enable(adev, false);
4066 		/* must disable polling for SRIOV when hw finished, otherwise
4067 		 * CPC engine may still keep fetching WB address which is already
4068 		 * invalid after sw finished and trigger DMAR reading error in
4069 		 * hypervisor side.
4070 		 */
4071 		WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4072 		return 0;
4073 	}
4074 
4075 	/* Use deinitialize sequence from CAIL when unbinding device from driver,
4076 	 * otherwise KIQ is hanging when binding back
4077 	 */
4078 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4079 		mutex_lock(&adev->srbm_mutex);
4080 		soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
4081 				adev->gfx.kiq[0].ring.pipe,
4082 				adev->gfx.kiq[0].ring.queue, 0, 0);
4083 		gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
4084 		soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4085 		mutex_unlock(&adev->srbm_mutex);
4086 	}
4087 
4088 	gfx_v9_0_cp_enable(adev, false);
4089 
4090 	/* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
4091 	if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
4092 	    (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) {
4093 		dev_dbg(adev->dev, "Skipping RLC halt\n");
4094 		return 0;
4095 	}
4096 
4097 	adev->gfx.rlc.funcs->stop(adev);
4098 	return 0;
4099 }
4100 
4101 static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
4102 {
4103 	return gfx_v9_0_hw_fini(ip_block);
4104 }
4105 
4106 static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
4107 {
4108 	return gfx_v9_0_hw_init(ip_block);
4109 }
4110 
4111 static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
4112 {
4113 	struct amdgpu_device *adev = ip_block->adev;
4114 
4115 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
4116 				GRBM_STATUS, GUI_ACTIVE))
4117 		return false;
4118 	else
4119 		return true;
4120 }
4121 
4122 static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4123 {
4124 	unsigned i;
4125 	struct amdgpu_device *adev = ip_block->adev;
4126 
4127 	for (i = 0; i < adev->usec_timeout; i++) {
4128 		if (gfx_v9_0_is_idle(ip_block))
4129 			return 0;
4130 		udelay(1);
4131 	}
4132 	return -ETIMEDOUT;
4133 }
4134 
4135 static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
4136 {
4137 	u32 grbm_soft_reset = 0;
4138 	u32 tmp;
4139 	struct amdgpu_device *adev = ip_block->adev;
4140 
4141 	/* GRBM_STATUS */
4142 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4143 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4144 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4145 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4146 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4147 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4148 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4149 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4150 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4151 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4152 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4153 	}
4154 
4155 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4156 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4157 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4158 	}
4159 
4160 	/* GRBM_STATUS2 */
4161 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4162 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4163 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4164 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4165 
4166 
4167 	if (grbm_soft_reset) {
4168 		/* stop the rlc */
4169 		adev->gfx.rlc.funcs->stop(adev);
4170 
4171 		if (adev->gfx.num_gfx_rings)
4172 			/* Disable GFX parsing/prefetching */
4173 			gfx_v9_0_cp_gfx_enable(adev, false);
4174 
4175 		/* Disable MEC parsing/prefetching */
4176 		gfx_v9_0_cp_compute_enable(adev, false);
4177 
4178 		if (grbm_soft_reset) {
4179 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4180 			tmp |= grbm_soft_reset;
4181 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4182 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4183 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4184 
4185 			udelay(50);
4186 
4187 			tmp &= ~grbm_soft_reset;
4188 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4189 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4190 		}
4191 
4192 		/* Wait a little for things to settle down */
4193 		udelay(50);
4194 	}
4195 	return 0;
4196 }
4197 
4198 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4199 {
4200 	signed long r, cnt = 0;
4201 	unsigned long flags;
4202 	uint32_t seq, reg_val_offs = 0;
4203 	uint64_t value = 0;
4204 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
4205 	struct amdgpu_ring *ring = &kiq->ring;
4206 
4207 	BUG_ON(!ring->funcs->emit_rreg);
4208 
4209 	spin_lock_irqsave(&kiq->ring_lock, flags);
4210 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4211 		pr_err("critical bug! too many kiq readers\n");
4212 		goto failed_unlock;
4213 	}
4214 	amdgpu_ring_alloc(ring, 32);
4215 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4216 	amdgpu_ring_write(ring, 9 |	/* src: register*/
4217 				(5 << 8) |	/* dst: memory */
4218 				(1 << 16) |	/* count sel */
4219 				(1 << 20));	/* write confirm */
4220 	amdgpu_ring_write(ring, 0);
4221 	amdgpu_ring_write(ring, 0);
4222 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4223 				reg_val_offs * 4));
4224 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4225 				reg_val_offs * 4));
4226 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4227 	if (r)
4228 		goto failed_undo;
4229 
4230 	amdgpu_ring_commit(ring);
4231 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4232 
4233 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4234 
4235 	/* don't wait anymore for gpu reset case because this way may
4236 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4237 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4238 	 * never return if we keep waiting in virt_kiq_rreg, which cause
4239 	 * gpu_recover() hang there.
4240 	 *
4241 	 * also don't wait anymore for IRQ context
4242 	 * */
4243 	if (r < 1 && (amdgpu_in_reset(adev)))
4244 		goto failed_kiq_read;
4245 
4246 	might_sleep();
4247 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4248 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4249 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4250 	}
4251 
4252 	if (cnt > MAX_KIQ_REG_TRY)
4253 		goto failed_kiq_read;
4254 
4255 	mb();
4256 	value = (uint64_t)adev->wb.wb[reg_val_offs] |
4257 		(uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4258 	amdgpu_device_wb_free(adev, reg_val_offs);
4259 	return value;
4260 
4261 failed_undo:
4262 	amdgpu_ring_undo(ring);
4263 failed_unlock:
4264 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4265 failed_kiq_read:
4266 	if (reg_val_offs)
4267 		amdgpu_device_wb_free(adev, reg_val_offs);
4268 	pr_err("failed to read gpu clock\n");
4269 	return ~0;
4270 }
4271 
4272 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4273 {
4274 	uint64_t clock, clock_lo, clock_hi, hi_check;
4275 
4276 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4277 	case IP_VERSION(9, 3, 0):
4278 		preempt_disable();
4279 		clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4280 		clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4281 		hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4282 		/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
4283 		 * roughly every 42 seconds.
4284 		 */
4285 		if (hi_check != clock_hi) {
4286 			clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4287 			clock_hi = hi_check;
4288 		}
4289 		preempt_enable();
4290 		clock = clock_lo | (clock_hi << 32ULL);
4291 		break;
4292 	default:
4293 		amdgpu_gfx_off_ctrl(adev, false);
4294 		mutex_lock(&adev->gfx.gpu_clock_mutex);
4295 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
4296 			    IP_VERSION(9, 0, 1) &&
4297 		    amdgpu_sriov_runtime(adev)) {
4298 			clock = gfx_v9_0_kiq_read_clock(adev);
4299 		} else {
4300 			WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4301 			clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4302 				((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4303 		}
4304 		mutex_unlock(&adev->gfx.gpu_clock_mutex);
4305 		amdgpu_gfx_off_ctrl(adev, true);
4306 		break;
4307 	}
4308 	return clock;
4309 }
4310 
4311 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4312 					  uint32_t vmid,
4313 					  uint32_t gds_base, uint32_t gds_size,
4314 					  uint32_t gws_base, uint32_t gws_size,
4315 					  uint32_t oa_base, uint32_t oa_size)
4316 {
4317 	struct amdgpu_device *adev = ring->adev;
4318 
4319 	/* GDS Base */
4320 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4321 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4322 				   gds_base);
4323 
4324 	/* GDS Size */
4325 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4326 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4327 				   gds_size);
4328 
4329 	/* GWS */
4330 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4331 				   SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4332 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4333 
4334 	/* OA */
4335 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4336 				   SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4337 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
4338 }
4339 
4340 static const u32 vgpr_init_compute_shader[] =
4341 {
4342 	0xb07c0000, 0xbe8000ff,
4343 	0x000000f8, 0xbf110800,
4344 	0x7e000280, 0x7e020280,
4345 	0x7e040280, 0x7e060280,
4346 	0x7e080280, 0x7e0a0280,
4347 	0x7e0c0280, 0x7e0e0280,
4348 	0x80808800, 0xbe803200,
4349 	0xbf84fff5, 0xbf9c0000,
4350 	0xd28c0001, 0x0001007f,
4351 	0xd28d0001, 0x0002027e,
4352 	0x10020288, 0xb8810904,
4353 	0xb7814000, 0xd1196a01,
4354 	0x00000301, 0xbe800087,
4355 	0xbefc00c1, 0xd89c4000,
4356 	0x00020201, 0xd89cc080,
4357 	0x00040401, 0x320202ff,
4358 	0x00000800, 0x80808100,
4359 	0xbf84fff8, 0x7e020280,
4360 	0xbf810000, 0x00000000,
4361 };
4362 
4363 static const u32 sgpr_init_compute_shader[] =
4364 {
4365 	0xb07c0000, 0xbe8000ff,
4366 	0x0000005f, 0xbee50080,
4367 	0xbe812c65, 0xbe822c65,
4368 	0xbe832c65, 0xbe842c65,
4369 	0xbe852c65, 0xb77c0005,
4370 	0x80808500, 0xbf84fff8,
4371 	0xbe800080, 0xbf810000,
4372 };
4373 
4374 static const u32 vgpr_init_compute_shader_arcturus[] = {
4375 	0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4376 	0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4377 	0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4378 	0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4379 	0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4380 	0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4381 	0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4382 	0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4383 	0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4384 	0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4385 	0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4386 	0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4387 	0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4388 	0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4389 	0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4390 	0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4391 	0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4392 	0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4393 	0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4394 	0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4395 	0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4396 	0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4397 	0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4398 	0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4399 	0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4400 	0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4401 	0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4402 	0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4403 	0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4404 	0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4405 	0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4406 	0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4407 	0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4408 	0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4409 	0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4410 	0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4411 	0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4412 	0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4413 	0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4414 	0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4415 	0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4416 	0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4417 	0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4418 	0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4419 	0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4420 	0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4421 	0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4422 	0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4423 	0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4424 	0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4425 	0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4426 	0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4427 	0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4428 	0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4429 	0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4430 	0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4431 	0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4432 	0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4433 	0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4434 	0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4435 	0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4436 	0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4437 	0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4438 	0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4439 	0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4440 	0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4441 	0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4442 	0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4443 	0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4444 	0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4445 	0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4446 	0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4447 	0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4448 	0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4449 	0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4450 	0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4451 	0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4452 	0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4453 	0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4454 	0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4455 	0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4456 	0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4457 	0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4458 	0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4459 	0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4460 	0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4461 	0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4462 	0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4463 	0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4464 	0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4465 	0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4466 	0xbf84fff8, 0xbf810000,
4467 };
4468 
4469 /* When below register arrays changed, please update gpr_reg_size,
4470   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4471   to cover all gfx9 ASICs */
4472 static const struct soc15_reg_entry vgpr_init_regs[] = {
4473    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4474    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4475    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4476    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4477    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4478    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4479    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4480    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4481    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4482    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4483    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4484    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4485    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4486    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4487 };
4488 
4489 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4490    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4491    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4492    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4493    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4494    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4495    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4496    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4497    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4498    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4499    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4500    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4501    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4502    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4503    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4504 };
4505 
4506 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4507    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4508    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4509    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4510    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4511    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4512    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4513    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4514    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4515    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4516    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4517    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4518    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4519    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4520    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4521 };
4522 
4523 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4524    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4525    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4526    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4527    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4528    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4529    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4530    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4531    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4532    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4533    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4534    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4535    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4536    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4537    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4538 };
4539 
4540 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4541    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4542    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4543    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4544    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4545    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4546    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4547    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4548    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4549    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4550    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4551    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4552    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4553    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4554    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4555    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4556    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4557    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4558    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4559    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4560    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4561    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4562    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4563    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4564    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4565    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4566    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4567    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4568    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4569    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4570    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4571    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4572    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4573    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4574 };
4575 
4576 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4577 {
4578 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4579 	int i, r;
4580 
4581 	/* only support when RAS is enabled */
4582 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4583 		return 0;
4584 
4585 	r = amdgpu_ring_alloc(ring, 7);
4586 	if (r) {
4587 		DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4588 			ring->name, r);
4589 		return r;
4590 	}
4591 
4592 	WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4593 	WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4594 
4595 	amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4596 	amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4597 				PACKET3_DMA_DATA_DST_SEL(1) |
4598 				PACKET3_DMA_DATA_SRC_SEL(2) |
4599 				PACKET3_DMA_DATA_ENGINE(0)));
4600 	amdgpu_ring_write(ring, 0);
4601 	amdgpu_ring_write(ring, 0);
4602 	amdgpu_ring_write(ring, 0);
4603 	amdgpu_ring_write(ring, 0);
4604 	amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4605 				adev->gds.gds_size);
4606 
4607 	amdgpu_ring_commit(ring);
4608 
4609 	for (i = 0; i < adev->usec_timeout; i++) {
4610 		if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4611 			break;
4612 		udelay(1);
4613 	}
4614 
4615 	if (i >= adev->usec_timeout)
4616 		r = -ETIMEDOUT;
4617 
4618 	WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4619 
4620 	return r;
4621 }
4622 
4623 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4624 {
4625 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4626 	struct amdgpu_ib ib;
4627 	struct dma_fence *f = NULL;
4628 	int r, i;
4629 	unsigned total_size, vgpr_offset, sgpr_offset;
4630 	u64 gpu_addr;
4631 
4632 	int compute_dim_x = adev->gfx.config.max_shader_engines *
4633 						adev->gfx.config.max_cu_per_sh *
4634 						adev->gfx.config.max_sh_per_se;
4635 	int sgpr_work_group_size = 5;
4636 	int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4637 	int vgpr_init_shader_size;
4638 	const u32 *vgpr_init_shader_ptr;
4639 	const struct soc15_reg_entry *vgpr_init_regs_ptr;
4640 
4641 	/* only support when RAS is enabled */
4642 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4643 		return 0;
4644 
4645 	/* bail if the compute ring is not ready */
4646 	if (!ring->sched.ready)
4647 		return 0;
4648 
4649 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
4650 		vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4651 		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4652 		vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4653 	} else {
4654 		vgpr_init_shader_ptr = vgpr_init_compute_shader;
4655 		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4656 		vgpr_init_regs_ptr = vgpr_init_regs;
4657 	}
4658 
4659 	total_size =
4660 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4661 	total_size +=
4662 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4663 	total_size +=
4664 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4665 	total_size = ALIGN(total_size, 256);
4666 	vgpr_offset = total_size;
4667 	total_size += ALIGN(vgpr_init_shader_size, 256);
4668 	sgpr_offset = total_size;
4669 	total_size += sizeof(sgpr_init_compute_shader);
4670 
4671 	/* allocate an indirect buffer to put the commands in */
4672 	memset(&ib, 0, sizeof(ib));
4673 	r = amdgpu_ib_get(adev, NULL, total_size,
4674 					AMDGPU_IB_POOL_DIRECT, &ib);
4675 	if (r) {
4676 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4677 		return r;
4678 	}
4679 
4680 	/* load the compute shaders */
4681 	for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4682 		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4683 
4684 	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4685 		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4686 
4687 	/* init the ib length to 0 */
4688 	ib.length_dw = 0;
4689 
4690 	/* VGPR */
4691 	/* write the register state for the compute dispatch */
4692 	for (i = 0; i < gpr_reg_size; i++) {
4693 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4694 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4695 								- PACKET3_SET_SH_REG_START;
4696 		ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4697 	}
4698 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4699 	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4700 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4701 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4702 							- PACKET3_SET_SH_REG_START;
4703 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4704 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4705 
4706 	/* write dispatch packet */
4707 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4708 	ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4709 	ib.ptr[ib.length_dw++] = 1; /* y */
4710 	ib.ptr[ib.length_dw++] = 1; /* z */
4711 	ib.ptr[ib.length_dw++] =
4712 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4713 
4714 	/* write CS partial flush packet */
4715 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4716 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4717 
4718 	/* SGPR1 */
4719 	/* write the register state for the compute dispatch */
4720 	for (i = 0; i < gpr_reg_size; i++) {
4721 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4722 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4723 								- PACKET3_SET_SH_REG_START;
4724 		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4725 	}
4726 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4727 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4728 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4729 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4730 							- PACKET3_SET_SH_REG_START;
4731 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4732 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4733 
4734 	/* write dispatch packet */
4735 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4736 	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4737 	ib.ptr[ib.length_dw++] = 1; /* y */
4738 	ib.ptr[ib.length_dw++] = 1; /* z */
4739 	ib.ptr[ib.length_dw++] =
4740 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4741 
4742 	/* write CS partial flush packet */
4743 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4744 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4745 
4746 	/* SGPR2 */
4747 	/* write the register state for the compute dispatch */
4748 	for (i = 0; i < gpr_reg_size; i++) {
4749 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4750 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4751 								- PACKET3_SET_SH_REG_START;
4752 		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4753 	}
4754 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4755 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4756 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4757 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4758 							- PACKET3_SET_SH_REG_START;
4759 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4760 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4761 
4762 	/* write dispatch packet */
4763 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4764 	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4765 	ib.ptr[ib.length_dw++] = 1; /* y */
4766 	ib.ptr[ib.length_dw++] = 1; /* z */
4767 	ib.ptr[ib.length_dw++] =
4768 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4769 
4770 	/* write CS partial flush packet */
4771 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4772 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4773 
4774 	/* shedule the ib on the ring */
4775 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4776 	if (r) {
4777 		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4778 		goto fail;
4779 	}
4780 
4781 	/* wait for the GPU to finish processing the IB */
4782 	r = dma_fence_wait(f, false);
4783 	if (r) {
4784 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4785 		goto fail;
4786 	}
4787 
4788 fail:
4789 	amdgpu_ib_free(&ib, NULL);
4790 	dma_fence_put(f);
4791 
4792 	return r;
4793 }
4794 
4795 static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block)
4796 {
4797 	struct amdgpu_device *adev = ip_block->adev;
4798 
4799 	adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
4800 
4801 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
4802 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4803 		adev->gfx.num_gfx_rings = 0;
4804 	else
4805 		adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4806 	adev->gfx.xcc_mask = 1;
4807 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4808 					  AMDGPU_MAX_COMPUTE_RINGS);
4809 	gfx_v9_0_set_kiq_pm4_funcs(adev);
4810 	gfx_v9_0_set_ring_funcs(adev);
4811 	gfx_v9_0_set_irq_funcs(adev);
4812 	gfx_v9_0_set_gds_init(adev);
4813 	gfx_v9_0_set_rlc_funcs(adev);
4814 
4815 	/* init rlcg reg access ctrl */
4816 	gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
4817 
4818 	return gfx_v9_0_init_microcode(adev);
4819 }
4820 
4821 static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block)
4822 {
4823 	struct amdgpu_device *adev = ip_block->adev;
4824 	int r;
4825 
4826 	/*
4827 	 * Temp workaround to fix the issue that CP firmware fails to
4828 	 * update read pointer when CPDMA is writing clearing operation
4829 	 * to GDS in suspend/resume sequence on several cards. So just
4830 	 * limit this operation in cold boot sequence.
4831 	 */
4832 	if ((!adev->in_suspend) &&
4833 	    (adev->gds.gds_size)) {
4834 		r = gfx_v9_0_do_edc_gds_workarounds(adev);
4835 		if (r)
4836 			return r;
4837 	}
4838 
4839 	/* requires IBs so do in late init after IB pool is initialized */
4840 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4841 		r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
4842 	else
4843 		r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4844 
4845 	if (r)
4846 		return r;
4847 
4848 	if (adev->gfx.ras &&
4849 	    adev->gfx.ras->enable_watchdog_timer)
4850 		adev->gfx.ras->enable_watchdog_timer(adev);
4851 
4852 	return 0;
4853 }
4854 
4855 static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block)
4856 {
4857 	struct amdgpu_device *adev = ip_block->adev;
4858 	int r;
4859 
4860 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4861 	if (r)
4862 		return r;
4863 
4864 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4865 	if (r)
4866 		return r;
4867 
4868 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
4869 	if (r)
4870 		return r;
4871 
4872 	r = gfx_v9_0_ecc_late_init(ip_block);
4873 	if (r)
4874 		return r;
4875 
4876 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4877 		gfx_v9_4_2_debug_trap_config_init(adev,
4878 			adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
4879 	else
4880 		gfx_v9_0_debug_trap_config_init(adev,
4881 			adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
4882 
4883 	return 0;
4884 }
4885 
4886 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4887 {
4888 	uint32_t rlc_setting;
4889 
4890 	/* if RLC is not enabled, do nothing */
4891 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4892 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4893 		return false;
4894 
4895 	return true;
4896 }
4897 
4898 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
4899 {
4900 	uint32_t data;
4901 	unsigned i;
4902 
4903 	data = RLC_SAFE_MODE__CMD_MASK;
4904 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4905 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4906 
4907 	/* wait for RLC_SAFE_MODE */
4908 	for (i = 0; i < adev->usec_timeout; i++) {
4909 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4910 			break;
4911 		udelay(1);
4912 	}
4913 }
4914 
4915 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
4916 {
4917 	uint32_t data;
4918 
4919 	data = RLC_SAFE_MODE__CMD_MASK;
4920 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4921 }
4922 
4923 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4924 						bool enable)
4925 {
4926 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4927 
4928 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4929 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4930 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4931 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4932 	} else {
4933 		gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4934 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4935 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4936 	}
4937 
4938 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4939 }
4940 
4941 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4942 						bool enable)
4943 {
4944 	/* TODO: double check if we need to perform under safe mode */
4945 	/* gfx_v9_0_enter_rlc_safe_mode(adev); */
4946 
4947 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4948 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4949 	else
4950 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4951 
4952 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4953 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4954 	else
4955 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4956 
4957 	/* gfx_v9_0_exit_rlc_safe_mode(adev); */
4958 }
4959 
4960 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4961 						      bool enable)
4962 {
4963 	uint32_t data, def;
4964 
4965 	/* It is disabled by HW by default */
4966 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4967 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
4968 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4969 
4970 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
4971 			data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4972 
4973 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4974 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4975 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4976 
4977 		/* only for Vega10 & Raven1 */
4978 		data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4979 
4980 		if (def != data)
4981 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4982 
4983 		/* MGLS is a global flag to control all MGLS in GFX */
4984 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4985 			/* 2 - RLC memory Light sleep */
4986 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4987 				def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4988 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4989 				if (def != data)
4990 					WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4991 			}
4992 			/* 3 - CP memory Light sleep */
4993 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4994 				def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4995 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4996 				if (def != data)
4997 					WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4998 			}
4999 		}
5000 	} else {
5001 		/* 1 - MGCG_OVERRIDE */
5002 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5003 
5004 		if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
5005 			data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
5006 
5007 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5008 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5009 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
5010 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
5011 
5012 		if (def != data)
5013 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5014 
5015 		/* 2 - disable MGLS in RLC */
5016 		data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
5017 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5018 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5019 			WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
5020 		}
5021 
5022 		/* 3 - disable MGLS in CP */
5023 		data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
5024 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5025 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5026 			WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
5027 		}
5028 	}
5029 }
5030 
5031 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
5032 					   bool enable)
5033 {
5034 	uint32_t data, def;
5035 
5036 	if (!adev->gfx.num_gfx_rings)
5037 		return;
5038 
5039 	/* Enable 3D CGCG/CGLS */
5040 	if (enable) {
5041 		/* write cmd to clear cgcg/cgls ov */
5042 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5043 		/* unset CGCG override */
5044 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5045 		/* update CGCG and CGLS override bits */
5046 		if (def != data)
5047 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5048 
5049 		/* enable 3Dcgcg FSM(0x0000363f) */
5050 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5051 
5052 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5053 			data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5054 				RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5055 		else
5056 			data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
5057 
5058 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5059 			data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5060 				RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5061 		if (def != data)
5062 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5063 
5064 		/* set IDLE_POLL_COUNT(0x00900100) */
5065 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5066 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5067 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5068 		if (def != data)
5069 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5070 	} else {
5071 		/* Disable CGCG/CGLS */
5072 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5073 		/* disable cgcg, cgls should be disabled */
5074 		data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
5075 			  RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
5076 		/* disable cgcg and cgls in FSM */
5077 		if (def != data)
5078 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5079 	}
5080 }
5081 
5082 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5083 						      bool enable)
5084 {
5085 	uint32_t def, data;
5086 
5087 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5088 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5089 		/* unset CGCG override */
5090 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5091 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5092 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5093 		else
5094 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5095 		/* update CGCG and CGLS override bits */
5096 		if (def != data)
5097 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5098 
5099 		/* enable cgcg FSM(0x0000363F) */
5100 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5101 
5102 		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))
5103 			data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5104 				RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5105 		else
5106 			data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5107 				RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5108 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5109 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5110 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5111 		if (def != data)
5112 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5113 
5114 		/* set IDLE_POLL_COUNT(0x00900100) */
5115 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5116 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5117 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5118 		if (def != data)
5119 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5120 	} else {
5121 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5122 		/* reset CGCG/CGLS bits */
5123 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5124 		/* disable cgcg and cgls in FSM */
5125 		if (def != data)
5126 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5127 	}
5128 }
5129 
5130 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5131 					    bool enable)
5132 {
5133 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5134 	if (enable) {
5135 		/* CGCG/CGLS should be enabled after MGCG/MGLS
5136 		 * ===  MGCG + MGLS ===
5137 		 */
5138 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5139 		/* ===  CGCG /CGLS for GFX 3D Only === */
5140 		gfx_v9_0_update_3d_clock_gating(adev, enable);
5141 		/* ===  CGCG + CGLS === */
5142 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5143 	} else {
5144 		/* CGCG/CGLS should be disabled before MGCG/MGLS
5145 		 * ===  CGCG + CGLS ===
5146 		 */
5147 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5148 		/* ===  CGCG /CGLS for GFX 3D Only === */
5149 		gfx_v9_0_update_3d_clock_gating(adev, enable);
5150 		/* ===  MGCG + MGLS === */
5151 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5152 	}
5153 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5154 	return 0;
5155 }
5156 
5157 static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
5158 					      unsigned int vmid)
5159 {
5160 	u32 reg, data;
5161 
5162 	reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5163 	if (amdgpu_sriov_is_pp_one_vf(adev))
5164 		data = RREG32_NO_KIQ(reg);
5165 	else
5166 		data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
5167 
5168 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5169 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5170 
5171 	if (amdgpu_sriov_is_pp_one_vf(adev))
5172 		WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5173 	else
5174 		WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5175 }
5176 
5177 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid)
5178 {
5179 	amdgpu_gfx_off_ctrl(adev, false);
5180 
5181 	gfx_v9_0_update_spm_vmid_internal(adev, vmid);
5182 
5183 	amdgpu_gfx_off_ctrl(adev, true);
5184 }
5185 
5186 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5187 					uint32_t offset,
5188 					struct soc15_reg_rlcg *entries, int arr_size)
5189 {
5190 	int i;
5191 	uint32_t reg;
5192 
5193 	if (!entries)
5194 		return false;
5195 
5196 	for (i = 0; i < arr_size; i++) {
5197 		const struct soc15_reg_rlcg *entry;
5198 
5199 		entry = &entries[i];
5200 		reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5201 		if (offset == reg)
5202 			return true;
5203 	}
5204 
5205 	return false;
5206 }
5207 
5208 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5209 {
5210 	return gfx_v9_0_check_rlcg_range(adev, offset,
5211 					(void *)rlcg_access_gc_9_0,
5212 					ARRAY_SIZE(rlcg_access_gc_9_0));
5213 }
5214 
5215 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5216 	.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5217 	.set_safe_mode = gfx_v9_0_set_safe_mode,
5218 	.unset_safe_mode = gfx_v9_0_unset_safe_mode,
5219 	.init = gfx_v9_0_rlc_init,
5220 	.get_csb_size = gfx_v9_0_get_csb_size,
5221 	.get_csb_buffer = gfx_v9_0_get_csb_buffer,
5222 	.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5223 	.resume = gfx_v9_0_rlc_resume,
5224 	.stop = gfx_v9_0_rlc_stop,
5225 	.reset = gfx_v9_0_rlc_reset,
5226 	.start = gfx_v9_0_rlc_start,
5227 	.update_spm_vmid = gfx_v9_0_update_spm_vmid,
5228 	.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5229 };
5230 
5231 static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
5232 					  enum amd_powergating_state state)
5233 {
5234 	struct amdgpu_device *adev = ip_block->adev;
5235 	bool enable = (state == AMD_PG_STATE_GATE);
5236 
5237 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5238 	case IP_VERSION(9, 2, 2):
5239 	case IP_VERSION(9, 1, 0):
5240 	case IP_VERSION(9, 3, 0):
5241 		if (!enable)
5242 			amdgpu_gfx_off_ctrl_immediate(adev, false);
5243 
5244 		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5245 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5246 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5247 		} else {
5248 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5249 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5250 		}
5251 
5252 		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5253 			gfx_v9_0_enable_cp_power_gating(adev, true);
5254 		else
5255 			gfx_v9_0_enable_cp_power_gating(adev, false);
5256 
5257 		/* update gfx cgpg state */
5258 		gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5259 
5260 		/* update mgcg state */
5261 		gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5262 
5263 		if (enable)
5264 			amdgpu_gfx_off_ctrl_immediate(adev, true);
5265 		break;
5266 	case IP_VERSION(9, 2, 1):
5267 		amdgpu_gfx_off_ctrl_immediate(adev, enable);
5268 		break;
5269 	default:
5270 		break;
5271 	}
5272 
5273 	return 0;
5274 }
5275 
5276 static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
5277 					  enum amd_clockgating_state state)
5278 {
5279 	struct amdgpu_device *adev = ip_block->adev;
5280 
5281 	if (amdgpu_sriov_vf(adev))
5282 		return 0;
5283 
5284 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5285 	case IP_VERSION(9, 0, 1):
5286 	case IP_VERSION(9, 2, 1):
5287 	case IP_VERSION(9, 4, 0):
5288 	case IP_VERSION(9, 2, 2):
5289 	case IP_VERSION(9, 1, 0):
5290 	case IP_VERSION(9, 4, 1):
5291 	case IP_VERSION(9, 3, 0):
5292 	case IP_VERSION(9, 4, 2):
5293 		gfx_v9_0_update_gfx_clock_gating(adev,
5294 						 state == AMD_CG_STATE_GATE);
5295 		break;
5296 	default:
5297 		break;
5298 	}
5299 	return 0;
5300 }
5301 
5302 static void gfx_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
5303 {
5304 	struct amdgpu_device *adev = ip_block->adev;
5305 	int data;
5306 
5307 	if (amdgpu_sriov_vf(adev))
5308 		*flags = 0;
5309 
5310 	/* AMD_CG_SUPPORT_GFX_MGCG */
5311 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5312 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5313 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5314 
5315 	/* AMD_CG_SUPPORT_GFX_CGCG */
5316 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5317 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5318 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5319 
5320 	/* AMD_CG_SUPPORT_GFX_CGLS */
5321 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5322 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5323 
5324 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
5325 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5326 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5327 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5328 
5329 	/* AMD_CG_SUPPORT_GFX_CP_LS */
5330 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5331 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5332 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5333 
5334 	if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) {
5335 		/* AMD_CG_SUPPORT_GFX_3D_CGCG */
5336 		data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5337 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5338 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5339 
5340 		/* AMD_CG_SUPPORT_GFX_3D_CGLS */
5341 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5342 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5343 	}
5344 }
5345 
5346 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5347 {
5348 	return *ring->rptr_cpu_addr; /* gfx9 is 32bit rptr*/
5349 }
5350 
5351 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5352 {
5353 	struct amdgpu_device *adev = ring->adev;
5354 	u64 wptr;
5355 
5356 	/* XXX check if swapping is necessary on BE */
5357 	if (ring->use_doorbell) {
5358 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5359 	} else {
5360 		wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5361 		wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5362 	}
5363 
5364 	return wptr;
5365 }
5366 
5367 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5368 {
5369 	struct amdgpu_device *adev = ring->adev;
5370 
5371 	if (ring->use_doorbell) {
5372 		/* XXX check if swapping is necessary on BE */
5373 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5374 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5375 	} else {
5376 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5377 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5378 	}
5379 }
5380 
5381 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5382 {
5383 	struct amdgpu_device *adev = ring->adev;
5384 	u32 ref_and_mask, reg_mem_engine;
5385 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5386 
5387 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5388 		switch (ring->me) {
5389 		case 1:
5390 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5391 			break;
5392 		case 2:
5393 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5394 			break;
5395 		default:
5396 			return;
5397 		}
5398 		reg_mem_engine = 0;
5399 	} else {
5400 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5401 		reg_mem_engine = 1; /* pfp */
5402 	}
5403 
5404 	gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5405 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5406 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5407 			      ref_and_mask, ref_and_mask, 0x20);
5408 }
5409 
5410 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5411 					struct amdgpu_job *job,
5412 					struct amdgpu_ib *ib,
5413 					uint32_t flags)
5414 {
5415 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5416 	u32 header, control = 0;
5417 
5418 	if (ib->flags & AMDGPU_IB_FLAG_CE)
5419 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5420 	else
5421 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5422 
5423 	control |= ib->length_dw | (vmid << 24);
5424 
5425 	if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
5426 		control |= INDIRECT_BUFFER_PRE_ENB(1);
5427 
5428 		if (flags & AMDGPU_IB_PREEMPTED)
5429 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
5430 
5431 		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5432 			gfx_v9_0_ring_emit_de_meta(ring,
5433 						   (!amdgpu_sriov_vf(ring->adev) &&
5434 						   flags & AMDGPU_IB_PREEMPTED) ?
5435 						   true : false,
5436 						   job->gds_size > 0 && job->gds_base != 0);
5437 	}
5438 
5439 	amdgpu_ring_write(ring, header);
5440 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5441 	amdgpu_ring_write(ring,
5442 #ifdef __BIG_ENDIAN
5443 		(2 << 0) |
5444 #endif
5445 		lower_32_bits(ib->gpu_addr));
5446 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5447 	amdgpu_ring_ib_on_emit_cntl(ring);
5448 	amdgpu_ring_write(ring, control);
5449 }
5450 
5451 static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
5452 				     unsigned offset)
5453 {
5454 	u32 control = ring->ring[offset];
5455 
5456 	control |= INDIRECT_BUFFER_PRE_RESUME(1);
5457 	ring->ring[offset] = control;
5458 }
5459 
5460 static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
5461 					unsigned offset)
5462 {
5463 	struct amdgpu_device *adev = ring->adev;
5464 	void *ce_payload_cpu_addr;
5465 	uint64_t payload_offset, payload_size;
5466 
5467 	payload_size = sizeof(struct v9_ce_ib_state);
5468 
5469 	if (ring->is_mes_queue) {
5470 		payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5471 					  gfx[0].gfx_meta_data) +
5472 			offsetof(struct v9_gfx_meta_data, ce_payload);
5473 		ce_payload_cpu_addr =
5474 			amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
5475 	} else {
5476 		payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
5477 		ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
5478 	}
5479 
5480 	if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
5481 		memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
5482 	} else {
5483 		memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
5484 		       (ring->buf_mask + 1 - offset) << 2);
5485 		payload_size -= (ring->buf_mask + 1 - offset) << 2;
5486 		memcpy((void *)&ring->ring[0],
5487 		       ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
5488 		       payload_size);
5489 	}
5490 }
5491 
5492 static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
5493 					unsigned offset)
5494 {
5495 	struct amdgpu_device *adev = ring->adev;
5496 	void *de_payload_cpu_addr;
5497 	uint64_t payload_offset, payload_size;
5498 
5499 	payload_size = sizeof(struct v9_de_ib_state);
5500 
5501 	if (ring->is_mes_queue) {
5502 		payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5503 					  gfx[0].gfx_meta_data) +
5504 			offsetof(struct v9_gfx_meta_data, de_payload);
5505 		de_payload_cpu_addr =
5506 			amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
5507 	} else {
5508 		payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
5509 		de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
5510 	}
5511 
5512 	((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
5513 		IB_COMPLETION_STATUS_PREEMPTED;
5514 
5515 	if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
5516 		memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
5517 	} else {
5518 		memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
5519 		       (ring->buf_mask + 1 - offset) << 2);
5520 		payload_size -= (ring->buf_mask + 1 - offset) << 2;
5521 		memcpy((void *)&ring->ring[0],
5522 		       de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
5523 		       payload_size);
5524 	}
5525 }
5526 
5527 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5528 					  struct amdgpu_job *job,
5529 					  struct amdgpu_ib *ib,
5530 					  uint32_t flags)
5531 {
5532 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5533 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5534 
5535 	/* Currently, there is a high possibility to get wave ID mismatch
5536 	 * between ME and GDS, leading to a hw deadlock, because ME generates
5537 	 * different wave IDs than the GDS expects. This situation happens
5538 	 * randomly when at least 5 compute pipes use GDS ordered append.
5539 	 * The wave IDs generated by ME are also wrong after suspend/resume.
5540 	 * Those are probably bugs somewhere else in the kernel driver.
5541 	 *
5542 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5543 	 * GDS to 0 for this ring (me/pipe).
5544 	 */
5545 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5546 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5547 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5548 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5549 	}
5550 
5551 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5552 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5553 	amdgpu_ring_write(ring,
5554 #ifdef __BIG_ENDIAN
5555 				(2 << 0) |
5556 #endif
5557 				lower_32_bits(ib->gpu_addr));
5558 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5559 	amdgpu_ring_write(ring, control);
5560 }
5561 
5562 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5563 				     u64 seq, unsigned flags)
5564 {
5565 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5566 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5567 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5568 	bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
5569 	uint32_t dw2 = 0;
5570 
5571 	/* RELEASE_MEM - flush caches, send int */
5572 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5573 
5574 	if (writeback) {
5575 		dw2 = EOP_TC_NC_ACTION_EN;
5576 	} else {
5577 		dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN |
5578 				EOP_TC_MD_ACTION_EN;
5579 	}
5580 	dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5581 				EVENT_INDEX(5);
5582 	if (exec)
5583 		dw2 |= EOP_EXEC;
5584 
5585 	amdgpu_ring_write(ring, dw2);
5586 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5587 
5588 	/*
5589 	 * the address should be Qword aligned if 64bit write, Dword
5590 	 * aligned if only send 32bit data low (discard data high)
5591 	 */
5592 	if (write64bit)
5593 		BUG_ON(addr & 0x7);
5594 	else
5595 		BUG_ON(addr & 0x3);
5596 	amdgpu_ring_write(ring, lower_32_bits(addr));
5597 	amdgpu_ring_write(ring, upper_32_bits(addr));
5598 	amdgpu_ring_write(ring, lower_32_bits(seq));
5599 	amdgpu_ring_write(ring, upper_32_bits(seq));
5600 	amdgpu_ring_write(ring, 0);
5601 }
5602 
5603 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5604 {
5605 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5606 	uint32_t seq = ring->fence_drv.sync_seq;
5607 	uint64_t addr = ring->fence_drv.gpu_addr;
5608 
5609 	gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5610 			      lower_32_bits(addr), upper_32_bits(addr),
5611 			      seq, 0xffffffff, 4);
5612 }
5613 
5614 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5615 					unsigned vmid, uint64_t pd_addr)
5616 {
5617 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5618 
5619 	/* compute doesn't have PFP */
5620 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5621 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
5622 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5623 		amdgpu_ring_write(ring, 0x0);
5624 	}
5625 }
5626 
5627 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5628 {
5629 	return *ring->rptr_cpu_addr; /* gfx9 hardware is 32bit rptr */
5630 }
5631 
5632 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5633 {
5634 	u64 wptr;
5635 
5636 	/* XXX check if swapping is necessary on BE */
5637 	if (ring->use_doorbell)
5638 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5639 	else
5640 		BUG();
5641 	return wptr;
5642 }
5643 
5644 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5645 {
5646 	struct amdgpu_device *adev = ring->adev;
5647 
5648 	/* XXX check if swapping is necessary on BE */
5649 	if (ring->use_doorbell) {
5650 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5651 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5652 	} else{
5653 		BUG(); /* only DOORBELL method supported on gfx9 now */
5654 	}
5655 }
5656 
5657 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5658 					 u64 seq, unsigned int flags)
5659 {
5660 	struct amdgpu_device *adev = ring->adev;
5661 
5662 	/* we only allocate 32bit for each seq wb address */
5663 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5664 
5665 	/* write fence seq to the "addr" */
5666 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5667 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5668 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5669 	amdgpu_ring_write(ring, lower_32_bits(addr));
5670 	amdgpu_ring_write(ring, upper_32_bits(addr));
5671 	amdgpu_ring_write(ring, lower_32_bits(seq));
5672 
5673 	if (flags & AMDGPU_FENCE_FLAG_INT) {
5674 		/* set register to trigger INT */
5675 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5676 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5677 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5678 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5679 		amdgpu_ring_write(ring, 0);
5680 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5681 	}
5682 }
5683 
5684 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5685 {
5686 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5687 	amdgpu_ring_write(ring, 0);
5688 }
5689 
5690 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
5691 {
5692 	struct amdgpu_device *adev = ring->adev;
5693 	struct v9_ce_ib_state ce_payload = {0};
5694 	uint64_t offset, ce_payload_gpu_addr;
5695 	void *ce_payload_cpu_addr;
5696 	int cnt;
5697 
5698 	cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5699 
5700 	if (ring->is_mes_queue) {
5701 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5702 				  gfx[0].gfx_meta_data) +
5703 			offsetof(struct v9_gfx_meta_data, ce_payload);
5704 		ce_payload_gpu_addr =
5705 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5706 		ce_payload_cpu_addr =
5707 			amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
5708 	} else {
5709 		offset = offsetof(struct v9_gfx_meta_data, ce_payload);
5710 		ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5711 		ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5712 	}
5713 
5714 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5715 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5716 				 WRITE_DATA_DST_SEL(8) |
5717 				 WR_CONFIRM) |
5718 				 WRITE_DATA_CACHE_POLICY(0));
5719 	amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
5720 	amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));
5721 
5722 	amdgpu_ring_ib_on_emit_ce(ring);
5723 
5724 	if (resume)
5725 		amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
5726 					   sizeof(ce_payload) >> 2);
5727 	else
5728 		amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
5729 					   sizeof(ce_payload) >> 2);
5730 }
5731 
5732 static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
5733 {
5734 	int i, r = 0;
5735 	struct amdgpu_device *adev = ring->adev;
5736 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5737 	struct amdgpu_ring *kiq_ring = &kiq->ring;
5738 	unsigned long flags;
5739 
5740 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5741 		return -EINVAL;
5742 
5743 	spin_lock_irqsave(&kiq->ring_lock, flags);
5744 
5745 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5746 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
5747 		return -ENOMEM;
5748 	}
5749 
5750 	/* assert preemption condition */
5751 	amdgpu_ring_set_preempt_cond_exec(ring, false);
5752 
5753 	ring->trail_seq += 1;
5754 	amdgpu_ring_alloc(ring, 13);
5755 	gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
5756 				 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);
5757 
5758 	/* assert IB preemption, emit the trailing fence */
5759 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5760 				   ring->trail_fence_gpu_addr,
5761 				   ring->trail_seq);
5762 
5763 	amdgpu_ring_commit(kiq_ring);
5764 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
5765 
5766 	/* poll the trailing fence */
5767 	for (i = 0; i < adev->usec_timeout; i++) {
5768 		if (ring->trail_seq ==
5769 			le32_to_cpu(*ring->trail_fence_cpu_addr))
5770 			break;
5771 		udelay(1);
5772 	}
5773 
5774 	if (i >= adev->usec_timeout) {
5775 		r = -EINVAL;
5776 		DRM_WARN("ring %d timeout to preempt ib\n", ring->idx);
5777 	}
5778 
5779 	/*reset the CP_VMID_PREEMPT after trailing fence*/
5780 	amdgpu_ring_emit_wreg(ring,
5781 			      SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
5782 			      0x0);
5783 	amdgpu_ring_commit(ring);
5784 
5785 	/* deassert preemption condition */
5786 	amdgpu_ring_set_preempt_cond_exec(ring, true);
5787 	return r;
5788 }
5789 
5790 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
5791 {
5792 	struct amdgpu_device *adev = ring->adev;
5793 	struct v9_de_ib_state de_payload = {0};
5794 	uint64_t offset, gds_addr, de_payload_gpu_addr;
5795 	void *de_payload_cpu_addr;
5796 	int cnt;
5797 
5798 	if (ring->is_mes_queue) {
5799 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5800 				  gfx[0].gfx_meta_data) +
5801 			offsetof(struct v9_gfx_meta_data, de_payload);
5802 		de_payload_gpu_addr =
5803 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5804 		de_payload_cpu_addr =
5805 			amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
5806 
5807 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5808 				  gfx[0].gds_backup) +
5809 			offsetof(struct v9_gfx_meta_data, de_payload);
5810 		gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5811 	} else {
5812 		offset = offsetof(struct v9_gfx_meta_data, de_payload);
5813 		de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5814 		de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5815 
5816 		gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5817 				 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5818 				 PAGE_SIZE);
5819 	}
5820 
5821 	if (usegds) {
5822 		de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5823 		de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5824 	}
5825 
5826 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5827 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5828 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5829 				 WRITE_DATA_DST_SEL(8) |
5830 				 WR_CONFIRM) |
5831 				 WRITE_DATA_CACHE_POLICY(0));
5832 	amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5833 	amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5834 
5835 	amdgpu_ring_ib_on_emit_de(ring);
5836 	if (resume)
5837 		amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5838 					   sizeof(de_payload) >> 2);
5839 	else
5840 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5841 					   sizeof(de_payload) >> 2);
5842 }
5843 
5844 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5845 				   bool secure)
5846 {
5847 	uint32_t v = secure ? FRAME_TMZ : 0;
5848 
5849 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5850 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5851 }
5852 
5853 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5854 {
5855 	uint32_t dw2 = 0;
5856 
5857 	gfx_v9_0_ring_emit_ce_meta(ring,
5858 				   (!amdgpu_sriov_vf(ring->adev) &&
5859 				   flags & AMDGPU_IB_PREEMPTED) ? true : false);
5860 
5861 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5862 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5863 		/* set load_global_config & load_global_uconfig */
5864 		dw2 |= 0x8001;
5865 		/* set load_cs_sh_regs */
5866 		dw2 |= 0x01000000;
5867 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
5868 		dw2 |= 0x10002;
5869 
5870 		/* set load_ce_ram if preamble presented */
5871 		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5872 			dw2 |= 0x10000000;
5873 	} else {
5874 		/* still load_ce_ram if this is the first time preamble presented
5875 		 * although there is no context switch happens.
5876 		 */
5877 		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5878 			dw2 |= 0x10000000;
5879 	}
5880 
5881 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5882 	amdgpu_ring_write(ring, dw2);
5883 	amdgpu_ring_write(ring, 0);
5884 }
5885 
5886 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
5887 						  uint64_t addr)
5888 {
5889 	unsigned ret;
5890 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5891 	amdgpu_ring_write(ring, lower_32_bits(addr));
5892 	amdgpu_ring_write(ring, upper_32_bits(addr));
5893 	/* discard following DWs if *cond_exec_gpu_addr==0 */
5894 	amdgpu_ring_write(ring, 0);
5895 	ret = ring->wptr & ring->buf_mask;
5896 	/* patch dummy value later */
5897 	amdgpu_ring_write(ring, 0);
5898 	return ret;
5899 }
5900 
5901 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5902 				    uint32_t reg_val_offs)
5903 {
5904 	struct amdgpu_device *adev = ring->adev;
5905 
5906 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5907 	amdgpu_ring_write(ring, 0 |	/* src: register*/
5908 				(5 << 8) |	/* dst: memory */
5909 				(1 << 20));	/* write confirm */
5910 	amdgpu_ring_write(ring, reg);
5911 	amdgpu_ring_write(ring, 0);
5912 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5913 				reg_val_offs * 4));
5914 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5915 				reg_val_offs * 4));
5916 }
5917 
5918 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5919 				    uint32_t val)
5920 {
5921 	uint32_t cmd = 0;
5922 
5923 	switch (ring->funcs->type) {
5924 	case AMDGPU_RING_TYPE_GFX:
5925 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5926 		break;
5927 	case AMDGPU_RING_TYPE_KIQ:
5928 		cmd = (1 << 16); /* no inc addr */
5929 		break;
5930 	default:
5931 		cmd = WR_CONFIRM;
5932 		break;
5933 	}
5934 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5935 	amdgpu_ring_write(ring, cmd);
5936 	amdgpu_ring_write(ring, reg);
5937 	amdgpu_ring_write(ring, 0);
5938 	amdgpu_ring_write(ring, val);
5939 }
5940 
5941 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5942 					uint32_t val, uint32_t mask)
5943 {
5944 	gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5945 }
5946 
5947 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5948 						  uint32_t reg0, uint32_t reg1,
5949 						  uint32_t ref, uint32_t mask)
5950 {
5951 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5952 	struct amdgpu_device *adev = ring->adev;
5953 	bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5954 		adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5955 
5956 	if (fw_version_ok)
5957 		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5958 				      ref, mask, 0x20);
5959 	else
5960 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5961 							   ref, mask);
5962 }
5963 
5964 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5965 {
5966 	struct amdgpu_device *adev = ring->adev;
5967 	uint32_t value = 0;
5968 
5969 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5970 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5971 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5972 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5973 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5974 	WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5975 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5976 }
5977 
5978 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5979 						 enum amdgpu_interrupt_state state)
5980 {
5981 	switch (state) {
5982 	case AMDGPU_IRQ_STATE_DISABLE:
5983 	case AMDGPU_IRQ_STATE_ENABLE:
5984 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5985 			       TIME_STAMP_INT_ENABLE,
5986 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5987 		break;
5988 	default:
5989 		break;
5990 	}
5991 }
5992 
5993 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5994 						     int me, int pipe,
5995 						     enum amdgpu_interrupt_state state)
5996 {
5997 	u32 mec_int_cntl, mec_int_cntl_reg;
5998 
5999 	/*
6000 	 * amdgpu controls only the first MEC. That's why this function only
6001 	 * handles the setting of interrupts for this specific MEC. All other
6002 	 * pipes' interrupts are set by amdkfd.
6003 	 */
6004 
6005 	if (me == 1) {
6006 		switch (pipe) {
6007 		case 0:
6008 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
6009 			break;
6010 		case 1:
6011 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
6012 			break;
6013 		case 2:
6014 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
6015 			break;
6016 		case 3:
6017 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
6018 			break;
6019 		default:
6020 			DRM_DEBUG("invalid pipe %d\n", pipe);
6021 			return;
6022 		}
6023 	} else {
6024 		DRM_DEBUG("invalid me %d\n", me);
6025 		return;
6026 	}
6027 
6028 	switch (state) {
6029 	case AMDGPU_IRQ_STATE_DISABLE:
6030 		mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
6031 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6032 					     TIME_STAMP_INT_ENABLE, 0);
6033 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6034 		break;
6035 	case AMDGPU_IRQ_STATE_ENABLE:
6036 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6037 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6038 					     TIME_STAMP_INT_ENABLE, 1);
6039 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6040 		break;
6041 	default:
6042 		break;
6043 	}
6044 }
6045 
6046 static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev,
6047 				     int me, int pipe)
6048 {
6049 	/*
6050 	 * amdgpu controls only the first MEC. That's why this function only
6051 	 * handles the setting of interrupts for this specific MEC. All other
6052 	 * pipes' interrupts are set by amdkfd.
6053 	 */
6054 	if (me != 1)
6055 		return 0;
6056 
6057 	switch (pipe) {
6058 	case 0:
6059 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
6060 	case 1:
6061 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
6062 	case 2:
6063 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
6064 	case 3:
6065 		return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
6066 	default:
6067 		return 0;
6068 	}
6069 }
6070 
6071 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6072 					     struct amdgpu_irq_src *source,
6073 					     unsigned type,
6074 					     enum amdgpu_interrupt_state state)
6075 {
6076 	u32 cp_int_cntl_reg, cp_int_cntl;
6077 	int i, j;
6078 
6079 	switch (state) {
6080 	case AMDGPU_IRQ_STATE_DISABLE:
6081 	case AMDGPU_IRQ_STATE_ENABLE:
6082 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6083 			       PRIV_REG_INT_ENABLE,
6084 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6085 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6086 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6087 				/* MECs start at 1 */
6088 				cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
6089 
6090 				if (cp_int_cntl_reg) {
6091 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6092 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6093 								    PRIV_REG_INT_ENABLE,
6094 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6095 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6096 				}
6097 			}
6098 		}
6099 		break;
6100 	default:
6101 		break;
6102 	}
6103 
6104 	return 0;
6105 }
6106 
6107 static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev,
6108 					   struct amdgpu_irq_src *source,
6109 					   unsigned type,
6110 					   enum amdgpu_interrupt_state state)
6111 {
6112 	u32 cp_int_cntl_reg, cp_int_cntl;
6113 	int i, j;
6114 
6115 	switch (state) {
6116 	case AMDGPU_IRQ_STATE_DISABLE:
6117 	case AMDGPU_IRQ_STATE_ENABLE:
6118 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6119 			       OPCODE_ERROR_INT_ENABLE,
6120 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6121 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6122 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6123 				/* MECs start at 1 */
6124 				cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
6125 
6126 				if (cp_int_cntl_reg) {
6127 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6128 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6129 								    OPCODE_ERROR_INT_ENABLE,
6130 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6131 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6132 				}
6133 			}
6134 		}
6135 		break;
6136 	default:
6137 		break;
6138 	}
6139 
6140 	return 0;
6141 }
6142 
6143 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6144 					      struct amdgpu_irq_src *source,
6145 					      unsigned type,
6146 					      enum amdgpu_interrupt_state state)
6147 {
6148 	switch (state) {
6149 	case AMDGPU_IRQ_STATE_DISABLE:
6150 	case AMDGPU_IRQ_STATE_ENABLE:
6151 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6152 			       PRIV_INSTR_INT_ENABLE,
6153 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6154 		break;
6155 	default:
6156 		break;
6157 	}
6158 
6159 	return 0;
6160 }
6161 
6162 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)				\
6163 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
6164 			CP_ECC_ERROR_INT_ENABLE, 1)
6165 
6166 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)			\
6167 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
6168 			CP_ECC_ERROR_INT_ENABLE, 0)
6169 
6170 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
6171 					      struct amdgpu_irq_src *source,
6172 					      unsigned type,
6173 					      enum amdgpu_interrupt_state state)
6174 {
6175 	switch (state) {
6176 	case AMDGPU_IRQ_STATE_DISABLE:
6177 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6178 				CP_ECC_ERROR_INT_ENABLE, 0);
6179 		DISABLE_ECC_ON_ME_PIPE(1, 0);
6180 		DISABLE_ECC_ON_ME_PIPE(1, 1);
6181 		DISABLE_ECC_ON_ME_PIPE(1, 2);
6182 		DISABLE_ECC_ON_ME_PIPE(1, 3);
6183 		break;
6184 
6185 	case AMDGPU_IRQ_STATE_ENABLE:
6186 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6187 				CP_ECC_ERROR_INT_ENABLE, 1);
6188 		ENABLE_ECC_ON_ME_PIPE(1, 0);
6189 		ENABLE_ECC_ON_ME_PIPE(1, 1);
6190 		ENABLE_ECC_ON_ME_PIPE(1, 2);
6191 		ENABLE_ECC_ON_ME_PIPE(1, 3);
6192 		break;
6193 	default:
6194 		break;
6195 	}
6196 
6197 	return 0;
6198 }
6199 
6200 
6201 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6202 					    struct amdgpu_irq_src *src,
6203 					    unsigned type,
6204 					    enum amdgpu_interrupt_state state)
6205 {
6206 	switch (type) {
6207 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6208 		gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
6209 		break;
6210 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6211 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6212 		break;
6213 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6214 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6215 		break;
6216 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6217 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6218 		break;
6219 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6220 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6221 		break;
6222 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6223 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6224 		break;
6225 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6226 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6227 		break;
6228 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6229 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6230 		break;
6231 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6232 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6233 		break;
6234 	default:
6235 		break;
6236 	}
6237 	return 0;
6238 }
6239 
6240 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
6241 			    struct amdgpu_irq_src *source,
6242 			    struct amdgpu_iv_entry *entry)
6243 {
6244 	int i;
6245 	u8 me_id, pipe_id, queue_id;
6246 	struct amdgpu_ring *ring;
6247 
6248 	DRM_DEBUG("IH: CP EOP\n");
6249 	me_id = (entry->ring_id & 0x0c) >> 2;
6250 	pipe_id = (entry->ring_id & 0x03) >> 0;
6251 	queue_id = (entry->ring_id & 0x70) >> 4;
6252 
6253 	switch (me_id) {
6254 	case 0:
6255 		if (adev->gfx.num_gfx_rings) {
6256 			if (!adev->gfx.mcbp) {
6257 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6258 			} else if (!amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) {
6259 				/* Fence signals are handled on the software rings*/
6260 				for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
6261 					amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]);
6262 			}
6263 		}
6264 		break;
6265 	case 1:
6266 	case 2:
6267 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6268 			ring = &adev->gfx.compute_ring[i];
6269 			/* Per-queue interrupt is supported for MEC starting from VI.
6270 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
6271 			  */
6272 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6273 				amdgpu_fence_process(ring);
6274 		}
6275 		break;
6276 	}
6277 	return 0;
6278 }
6279 
6280 static void gfx_v9_0_fault(struct amdgpu_device *adev,
6281 			   struct amdgpu_iv_entry *entry)
6282 {
6283 	u8 me_id, pipe_id, queue_id;
6284 	struct amdgpu_ring *ring;
6285 	int i;
6286 
6287 	me_id = (entry->ring_id & 0x0c) >> 2;
6288 	pipe_id = (entry->ring_id & 0x03) >> 0;
6289 	queue_id = (entry->ring_id & 0x70) >> 4;
6290 
6291 	switch (me_id) {
6292 	case 0:
6293 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6294 		break;
6295 	case 1:
6296 	case 2:
6297 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6298 			ring = &adev->gfx.compute_ring[i];
6299 			if (ring->me == me_id && ring->pipe == pipe_id &&
6300 			    ring->queue == queue_id)
6301 				drm_sched_fault(&ring->sched);
6302 		}
6303 		break;
6304 	}
6305 }
6306 
6307 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
6308 				 struct amdgpu_irq_src *source,
6309 				 struct amdgpu_iv_entry *entry)
6310 {
6311 	DRM_ERROR("Illegal register access in command stream\n");
6312 	gfx_v9_0_fault(adev, entry);
6313 	return 0;
6314 }
6315 
6316 static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev,
6317 			       struct amdgpu_irq_src *source,
6318 			       struct amdgpu_iv_entry *entry)
6319 {
6320 	DRM_ERROR("Illegal opcode in command stream\n");
6321 	gfx_v9_0_fault(adev, entry);
6322 	return 0;
6323 }
6324 
6325 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
6326 				  struct amdgpu_irq_src *source,
6327 				  struct amdgpu_iv_entry *entry)
6328 {
6329 	DRM_ERROR("Illegal instruction in command stream\n");
6330 	gfx_v9_0_fault(adev, entry);
6331 	return 0;
6332 }
6333 
6334 
6335 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
6336 	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
6337 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
6338 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
6339 	},
6340 	{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
6341 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
6342 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
6343 	},
6344 	{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
6345 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
6346 	  0, 0
6347 	},
6348 	{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
6349 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
6350 	  0, 0
6351 	},
6352 	{ "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
6353 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
6354 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
6355 	},
6356 	{ "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
6357 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
6358 	  0, 0
6359 	},
6360 	{ "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
6361 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
6362 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
6363 	},
6364 	{ "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
6365 	  SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
6366 	  SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
6367 	},
6368 	{ "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
6369 	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
6370 	  0, 0
6371 	},
6372 	{ "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
6373 	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
6374 	  0, 0
6375 	},
6376 	{ "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
6377 	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
6378 	  0, 0
6379 	},
6380 	{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6381 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
6382 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
6383 	},
6384 	{ "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6385 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
6386 	  0, 0
6387 	},
6388 	{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6389 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
6390 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
6391 	},
6392 	{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
6393 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6394 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
6395 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
6396 	},
6397 	{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
6398 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6399 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
6400 	  0, 0
6401 	},
6402 	{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
6403 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6404 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
6405 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
6406 	},
6407 	{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
6408 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6409 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
6410 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
6411 	},
6412 	{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
6413 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6414 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
6415 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
6416 	},
6417 	{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
6418 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6419 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
6420 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
6421 	},
6422 	{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
6423 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
6424 	  0, 0
6425 	},
6426 	{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6427 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
6428 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
6429 	},
6430 	{ "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6431 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6432 	  0, 0
6433 	},
6434 	{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6435 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6436 	  0, 0
6437 	},
6438 	{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6439 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6440 	  0, 0
6441 	},
6442 	{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6443 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6444 	  0, 0
6445 	},
6446 	{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6447 	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6448 	  0, 0
6449 	},
6450 	{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6451 	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6452 	  0, 0
6453 	},
6454 	{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6455 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6456 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6457 	},
6458 	{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6459 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6460 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6461 	},
6462 	{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6463 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6464 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6465 	},
6466 	{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6467 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6468 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6469 	},
6470 	{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6471 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6472 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6473 	},
6474 	{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6475 	  SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6476 	  0, 0
6477 	},
6478 	{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6479 	  SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6480 	  0, 0
6481 	},
6482 	{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6483 	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6484 	  0, 0
6485 	},
6486 	{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6487 	  SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6488 	  0, 0
6489 	},
6490 	{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6491 	  SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6492 	  0, 0
6493 	},
6494 	{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6495 	  SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6496 	  0, 0
6497 	},
6498 	{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6499 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6500 	  0, 0
6501 	},
6502 	{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6503 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6504 	  0, 0
6505 	},
6506 	{ "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6507 	  SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6508 	  0, 0
6509 	},
6510 	{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6511 	  SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6512 	  0, 0
6513 	},
6514 	{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6515 	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6516 	  0, 0
6517 	},
6518 	{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6519 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6520 	  0, 0
6521 	},
6522 	{ "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6523 	  SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6524 	  0, 0
6525 	},
6526 	{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6527 	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6528 	  0, 0
6529 	},
6530 	{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6531 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6532 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6533 	},
6534 	{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6535 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6536 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6537 	},
6538 	{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6539 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6540 	  0, 0
6541 	},
6542 	{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6543 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6544 	  0, 0
6545 	},
6546 	{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6547 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6548 	  0, 0
6549 	},
6550 	{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6551 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6552 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6553 	},
6554 	{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6555 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6556 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6557 	},
6558 	{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6559 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6560 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6561 	},
6562 	{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6563 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6564 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6565 	},
6566 	{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6567 	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6568 	  0, 0
6569 	},
6570 	{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6571 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6572 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6573 	},
6574 	{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6575 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6576 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6577 	},
6578 	{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6579 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6580 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6581 	},
6582 	{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6583 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6584 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6585 	},
6586 	{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6587 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6588 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6589 	},
6590 	{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6591 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6592 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6593 	},
6594 	{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6595 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6596 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6597 	},
6598 	{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6599 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6600 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6601 	},
6602 	{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6603 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6604 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6605 	},
6606 	{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6607 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6608 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6609 	},
6610 	{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6611 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6612 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6613 	},
6614 	{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6615 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6616 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6617 	},
6618 	{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6619 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6620 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6621 	},
6622 	{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6623 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6624 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6625 	},
6626 	{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6627 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6628 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6629 	},
6630 	{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6631 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6632 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6633 	},
6634 	{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6635 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6636 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6637 	},
6638 	{ "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6639 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6640 	  0, 0
6641 	},
6642 	{ "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6643 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6644 	  0, 0
6645 	},
6646 	{ "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6647 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6648 	  0, 0
6649 	},
6650 	{ "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6651 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6652 	  0, 0
6653 	},
6654 	{ "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6655 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6656 	  0, 0
6657 	},
6658 	{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6659 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6660 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6661 	},
6662 	{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6663 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6664 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6665 	},
6666 	{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6667 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6668 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6669 	},
6670 	{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6671 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6672 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6673 	},
6674 	{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6675 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6676 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6677 	},
6678 	{ "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6679 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6680 	  0, 0
6681 	},
6682 	{ "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6683 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6684 	  0, 0
6685 	},
6686 	{ "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6687 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6688 	  0, 0
6689 	},
6690 	{ "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6691 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6692 	  0, 0
6693 	},
6694 	{ "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6695 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6696 	  0, 0
6697 	},
6698 	{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6699 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6700 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6701 	},
6702 	{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6703 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6704 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6705 	},
6706 	{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6707 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6708 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6709 	},
6710 	{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6711 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6712 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6713 	},
6714 	{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6715 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6716 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6717 	},
6718 	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6719 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6720 	  0, 0
6721 	},
6722 	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6723 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6724 	  0, 0
6725 	},
6726 	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6727 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6728 	  0, 0
6729 	},
6730 	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6731 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6732 	  0, 0
6733 	},
6734 	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6735 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6736 	  0, 0
6737 	},
6738 	{ "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6739 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6740 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6741 	},
6742 	{ "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6743 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6744 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6745 	},
6746 	{ "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6747 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6748 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6749 	},
6750 	{ "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6751 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6752 	  0, 0
6753 	},
6754 	{ "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6755 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6756 	  0, 0
6757 	},
6758 	{ "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6759 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6760 	  0, 0
6761 	},
6762 	{ "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6763 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6764 	  0, 0
6765 	},
6766 	{ "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6767 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6768 	  0, 0
6769 	},
6770 	{ "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6771 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6772 	  0, 0
6773 	}
6774 };
6775 
6776 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6777 				     void *inject_if, uint32_t instance_mask)
6778 {
6779 	struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6780 	int ret;
6781 	struct ta_ras_trigger_error_input block_info = { 0 };
6782 
6783 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6784 		return -EINVAL;
6785 
6786 	if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6787 		return -EINVAL;
6788 
6789 	if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6790 		return -EPERM;
6791 
6792 	if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6793 	      info->head.type)) {
6794 		DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6795 			ras_gfx_subblocks[info->head.sub_block_index].name,
6796 			info->head.type);
6797 		return -EPERM;
6798 	}
6799 
6800 	if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6801 	      info->head.type)) {
6802 		DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6803 			ras_gfx_subblocks[info->head.sub_block_index].name,
6804 			info->head.type);
6805 		return -EPERM;
6806 	}
6807 
6808 	block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6809 	block_info.sub_block_index =
6810 		ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6811 	block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6812 	block_info.address = info->address;
6813 	block_info.value = info->value;
6814 
6815 	mutex_lock(&adev->grbm_idx_mutex);
6816 	ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask);
6817 	mutex_unlock(&adev->grbm_idx_mutex);
6818 
6819 	return ret;
6820 }
6821 
6822 static const char * const vml2_mems[] = {
6823 	"UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6824 	"UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6825 	"UTC_VML2_BANK_CACHE_0_4K_MEM0",
6826 	"UTC_VML2_BANK_CACHE_0_4K_MEM1",
6827 	"UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6828 	"UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6829 	"UTC_VML2_BANK_CACHE_1_4K_MEM0",
6830 	"UTC_VML2_BANK_CACHE_1_4K_MEM1",
6831 	"UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6832 	"UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6833 	"UTC_VML2_BANK_CACHE_2_4K_MEM0",
6834 	"UTC_VML2_BANK_CACHE_2_4K_MEM1",
6835 	"UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6836 	"UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6837 	"UTC_VML2_BANK_CACHE_3_4K_MEM0",
6838 	"UTC_VML2_BANK_CACHE_3_4K_MEM1",
6839 };
6840 
6841 static const char * const vml2_walker_mems[] = {
6842 	"UTC_VML2_CACHE_PDE0_MEM0",
6843 	"UTC_VML2_CACHE_PDE0_MEM1",
6844 	"UTC_VML2_CACHE_PDE1_MEM0",
6845 	"UTC_VML2_CACHE_PDE1_MEM1",
6846 	"UTC_VML2_CACHE_PDE2_MEM0",
6847 	"UTC_VML2_CACHE_PDE2_MEM1",
6848 	"UTC_VML2_RDIF_LOG_FIFO",
6849 };
6850 
6851 static const char * const atc_l2_cache_2m_mems[] = {
6852 	"UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6853 	"UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6854 	"UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6855 	"UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6856 };
6857 
6858 static const char *atc_l2_cache_4k_mems[] = {
6859 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6860 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6861 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6862 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6863 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6864 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6865 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6866 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6867 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6868 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6869 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6870 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6871 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6872 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6873 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6874 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6875 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6876 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6877 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6878 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6879 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6880 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6881 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6882 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6883 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6884 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6885 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6886 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6887 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6888 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6889 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6890 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6891 };
6892 
6893 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6894 					 struct ras_err_data *err_data)
6895 {
6896 	uint32_t i, data;
6897 	uint32_t sec_count, ded_count;
6898 
6899 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6900 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6901 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6902 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6903 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6904 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6905 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6906 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6907 
6908 	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6909 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6910 		data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6911 
6912 		sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6913 		if (sec_count) {
6914 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6915 				"SEC %d\n", i, vml2_mems[i], sec_count);
6916 			err_data->ce_count += sec_count;
6917 		}
6918 
6919 		ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6920 		if (ded_count) {
6921 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6922 				"DED %d\n", i, vml2_mems[i], ded_count);
6923 			err_data->ue_count += ded_count;
6924 		}
6925 	}
6926 
6927 	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6928 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6929 		data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6930 
6931 		sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6932 						SEC_COUNT);
6933 		if (sec_count) {
6934 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6935 				"SEC %d\n", i, vml2_walker_mems[i], sec_count);
6936 			err_data->ce_count += sec_count;
6937 		}
6938 
6939 		ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6940 						DED_COUNT);
6941 		if (ded_count) {
6942 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6943 				"DED %d\n", i, vml2_walker_mems[i], ded_count);
6944 			err_data->ue_count += ded_count;
6945 		}
6946 	}
6947 
6948 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6949 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6950 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6951 
6952 		sec_count = (data & 0x00006000L) >> 0xd;
6953 		if (sec_count) {
6954 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6955 				"SEC %d\n", i, atc_l2_cache_2m_mems[i],
6956 				sec_count);
6957 			err_data->ce_count += sec_count;
6958 		}
6959 	}
6960 
6961 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6962 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6963 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6964 
6965 		sec_count = (data & 0x00006000L) >> 0xd;
6966 		if (sec_count) {
6967 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6968 				"SEC %d\n", i, atc_l2_cache_4k_mems[i],
6969 				sec_count);
6970 			err_data->ce_count += sec_count;
6971 		}
6972 
6973 		ded_count = (data & 0x00018000L) >> 0xf;
6974 		if (ded_count) {
6975 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6976 				"DED %d\n", i, atc_l2_cache_4k_mems[i],
6977 				ded_count);
6978 			err_data->ue_count += ded_count;
6979 		}
6980 	}
6981 
6982 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6983 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6984 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6985 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6986 
6987 	return 0;
6988 }
6989 
6990 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6991 	const struct soc15_reg_entry *reg,
6992 	uint32_t se_id, uint32_t inst_id, uint32_t value,
6993 	uint32_t *sec_count, uint32_t *ded_count)
6994 {
6995 	uint32_t i;
6996 	uint32_t sec_cnt, ded_cnt;
6997 
6998 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6999 		if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
7000 			gfx_v9_0_ras_fields[i].seg != reg->seg ||
7001 			gfx_v9_0_ras_fields[i].inst != reg->inst)
7002 			continue;
7003 
7004 		sec_cnt = (value &
7005 				gfx_v9_0_ras_fields[i].sec_count_mask) >>
7006 				gfx_v9_0_ras_fields[i].sec_count_shift;
7007 		if (sec_cnt) {
7008 			dev_info(adev->dev, "GFX SubBlock %s, "
7009 				"Instance[%d][%d], SEC %d\n",
7010 				gfx_v9_0_ras_fields[i].name,
7011 				se_id, inst_id,
7012 				sec_cnt);
7013 			*sec_count += sec_cnt;
7014 		}
7015 
7016 		ded_cnt = (value &
7017 				gfx_v9_0_ras_fields[i].ded_count_mask) >>
7018 				gfx_v9_0_ras_fields[i].ded_count_shift;
7019 		if (ded_cnt) {
7020 			dev_info(adev->dev, "GFX SubBlock %s, "
7021 				"Instance[%d][%d], DED %d\n",
7022 				gfx_v9_0_ras_fields[i].name,
7023 				se_id, inst_id,
7024 				ded_cnt);
7025 			*ded_count += ded_cnt;
7026 		}
7027 	}
7028 
7029 	return 0;
7030 }
7031 
7032 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
7033 {
7034 	int i, j, k;
7035 
7036 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
7037 		return;
7038 
7039 	/* read back registers to clear the counters */
7040 	mutex_lock(&adev->grbm_idx_mutex);
7041 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
7042 		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
7043 			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
7044 				amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0);
7045 				RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
7046 			}
7047 		}
7048 	}
7049 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
7050 	mutex_unlock(&adev->grbm_idx_mutex);
7051 
7052 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
7053 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
7054 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
7055 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
7056 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
7057 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
7058 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
7059 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
7060 
7061 	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
7062 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
7063 		RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
7064 	}
7065 
7066 	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
7067 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
7068 		RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
7069 	}
7070 
7071 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
7072 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
7073 		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
7074 	}
7075 
7076 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
7077 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
7078 		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
7079 	}
7080 
7081 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
7082 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
7083 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
7084 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
7085 }
7086 
7087 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
7088 					  void *ras_error_status)
7089 {
7090 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
7091 	uint32_t sec_count = 0, ded_count = 0;
7092 	uint32_t i, j, k;
7093 	uint32_t reg_value;
7094 
7095 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
7096 		return;
7097 
7098 	err_data->ue_count = 0;
7099 	err_data->ce_count = 0;
7100 
7101 	mutex_lock(&adev->grbm_idx_mutex);
7102 
7103 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
7104 		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
7105 			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
7106 				amdgpu_gfx_select_se_sh(adev, j, 0, k, 0);
7107 				reg_value =
7108 					RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
7109 				if (reg_value)
7110 					gfx_v9_0_ras_error_count(adev,
7111 						&gfx_v9_0_edc_counter_regs[i],
7112 						j, k, reg_value,
7113 						&sec_count, &ded_count);
7114 			}
7115 		}
7116 	}
7117 
7118 	err_data->ce_count += sec_count;
7119 	err_data->ue_count += ded_count;
7120 
7121 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7122 	mutex_unlock(&adev->grbm_idx_mutex);
7123 
7124 	gfx_v9_0_query_utc_edc_status(adev, err_data);
7125 }
7126 
7127 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
7128 {
7129 	const unsigned int cp_coher_cntl =
7130 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
7131 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
7132 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
7133 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
7134 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
7135 
7136 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
7137 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
7138 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
7139 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
7140 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
7141 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
7142 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
7143 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
7144 }
7145 
7146 static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
7147 					uint32_t pipe, bool enable)
7148 {
7149 	struct amdgpu_device *adev = ring->adev;
7150 	uint32_t val;
7151 	uint32_t wcl_cs_reg;
7152 
7153 	/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
7154 	val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
7155 
7156 	switch (pipe) {
7157 	case 0:
7158 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
7159 		break;
7160 	case 1:
7161 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
7162 		break;
7163 	case 2:
7164 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
7165 		break;
7166 	case 3:
7167 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
7168 		break;
7169 	default:
7170 		DRM_DEBUG("invalid pipe %d\n", pipe);
7171 		return;
7172 	}
7173 
7174 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
7175 
7176 }
7177 static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
7178 {
7179 	struct amdgpu_device *adev = ring->adev;
7180 	uint32_t val;
7181 	int i;
7182 
7183 
7184 	/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
7185 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
7186 	 * around 25% of gpu resources.
7187 	 */
7188 	val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
7189 	amdgpu_ring_emit_wreg(ring,
7190 			      SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
7191 			      val);
7192 
7193 	/* Restrict waves for normal/low priority compute queues as well
7194 	 * to get best QoS for high priority compute jobs.
7195 	 *
7196 	 * amdgpu controls only 1st ME(0-3 CS pipes).
7197 	 */
7198 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
7199 		if (i != ring->pipe)
7200 			gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
7201 
7202 	}
7203 }
7204 
7205 static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
7206 {
7207 	/* Header itself is a NOP packet */
7208 	if (num_nop == 1) {
7209 		amdgpu_ring_write(ring, ring->funcs->nop);
7210 		return;
7211 	}
7212 
7213 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
7214 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
7215 
7216 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
7217 	amdgpu_ring_insert_nop(ring, num_nop - 1);
7218 }
7219 
7220 static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
7221 {
7222 	struct amdgpu_device *adev = ring->adev;
7223 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
7224 	struct amdgpu_ring *kiq_ring = &kiq->ring;
7225 	unsigned long flags;
7226 	u32 tmp;
7227 	int r;
7228 
7229 	if (amdgpu_sriov_vf(adev))
7230 		return -EINVAL;
7231 
7232 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
7233 		return -EINVAL;
7234 
7235 	spin_lock_irqsave(&kiq->ring_lock, flags);
7236 
7237 	if (amdgpu_ring_alloc(kiq_ring, 5)) {
7238 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
7239 		return -ENOMEM;
7240 	}
7241 
7242 	tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
7243 	gfx_v9_0_ring_emit_wreg(kiq_ring,
7244 				 SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
7245 	amdgpu_ring_commit(kiq_ring);
7246 
7247 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
7248 
7249 	r = amdgpu_ring_test_ring(kiq_ring);
7250 	if (r)
7251 		return r;
7252 
7253 	if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
7254 		return -ENOMEM;
7255 	gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
7256 				 ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
7257 	gfx_v9_0_ring_emit_reg_wait(ring,
7258 				    SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
7259 	gfx_v9_0_ring_emit_wreg(ring,
7260 				SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
7261 
7262 	return amdgpu_ring_test_ring(ring);
7263 }
7264 
7265 static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
7266 			      unsigned int vmid)
7267 {
7268 	struct amdgpu_device *adev = ring->adev;
7269 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
7270 	struct amdgpu_ring *kiq_ring = &kiq->ring;
7271 	unsigned long flags;
7272 	int i, r;
7273 
7274 	if (amdgpu_sriov_vf(adev))
7275 		return -EINVAL;
7276 
7277 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
7278 		return -EINVAL;
7279 
7280 	spin_lock_irqsave(&kiq->ring_lock, flags);
7281 
7282 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
7283 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
7284 		return -ENOMEM;
7285 	}
7286 
7287 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
7288 				   0, 0);
7289 	amdgpu_ring_commit(kiq_ring);
7290 
7291 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
7292 
7293 	r = amdgpu_ring_test_ring(kiq_ring);
7294 	if (r)
7295 		return r;
7296 
7297 	/* make sure dequeue is complete*/
7298 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
7299 	mutex_lock(&adev->srbm_mutex);
7300 	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
7301 	for (i = 0; i < adev->usec_timeout; i++) {
7302 		if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
7303 			break;
7304 		udelay(1);
7305 	}
7306 	if (i >= adev->usec_timeout)
7307 		r = -ETIMEDOUT;
7308 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
7309 	mutex_unlock(&adev->srbm_mutex);
7310 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
7311 	if (r) {
7312 		dev_err(adev->dev, "fail to wait on hqd deactive\n");
7313 		return r;
7314 	}
7315 
7316 	r = gfx_v9_0_kcq_init_queue(ring, true);
7317 	if (r) {
7318 		dev_err(adev->dev, "fail to init kcq\n");
7319 		return r;
7320 	}
7321 	spin_lock_irqsave(&kiq->ring_lock, flags);
7322 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
7323 	if (r) {
7324 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
7325 		return -ENOMEM;
7326 	}
7327 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
7328 	amdgpu_ring_commit(kiq_ring);
7329 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
7330 	r = amdgpu_ring_test_ring(kiq_ring);
7331 	if (r) {
7332 		DRM_ERROR("fail to remap queue\n");
7333 		return r;
7334 	}
7335 	return amdgpu_ring_test_ring(ring);
7336 }
7337 
7338 static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
7339 {
7340 	struct amdgpu_device *adev = ip_block->adev;
7341 	uint32_t i, j, k, reg, index = 0;
7342 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
7343 
7344 	if (!adev->gfx.ip_dump_core)
7345 		return;
7346 
7347 	for (i = 0; i < reg_count; i++)
7348 		drm_printf(p, "%-50s \t 0x%08x\n",
7349 			   gc_reg_list_9[i].reg_name,
7350 			   adev->gfx.ip_dump_core[i]);
7351 
7352 	/* print compute queue registers for all instances */
7353 	if (!adev->gfx.ip_dump_compute_queues)
7354 		return;
7355 
7356 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
7357 	drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
7358 		   adev->gfx.mec.num_mec,
7359 		   adev->gfx.mec.num_pipe_per_mec,
7360 		   adev->gfx.mec.num_queue_per_pipe);
7361 
7362 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7363 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7364 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7365 				drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
7366 				for (reg = 0; reg < reg_count; reg++) {
7367 					if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
7368 						drm_printf(p, "%-50s \t 0x%08x\n",
7369 							   "mmCP_MEC_ME2_HEADER_DUMP",
7370 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7371 					else
7372 						drm_printf(p, "%-50s \t 0x%08x\n",
7373 							   gc_cp_reg_list_9[reg].reg_name,
7374 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7375 				}
7376 				index += reg_count;
7377 			}
7378 		}
7379 	}
7380 
7381 }
7382 
7383 static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
7384 {
7385 	struct amdgpu_device *adev = ip_block->adev;
7386 	uint32_t i, j, k, reg, index = 0;
7387 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
7388 
7389 	if (!adev->gfx.ip_dump_core || !adev->gfx.num_gfx_rings)
7390 		return;
7391 
7392 	amdgpu_gfx_off_ctrl(adev, false);
7393 	for (i = 0; i < reg_count; i++)
7394 		adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_9[i]));
7395 	amdgpu_gfx_off_ctrl(adev, true);
7396 
7397 	/* dump compute queue registers for all instances */
7398 	if (!adev->gfx.ip_dump_compute_queues)
7399 		return;
7400 
7401 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
7402 	amdgpu_gfx_off_ctrl(adev, false);
7403 	mutex_lock(&adev->srbm_mutex);
7404 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7405 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7406 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7407 				/* ME0 is for GFX so start from 1 for CP */
7408 				soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
7409 
7410 				for (reg = 0; reg < reg_count; reg++) {
7411 					if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
7412 						adev->gfx.ip_dump_compute_queues[index + reg] =
7413 							RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
7414 					else
7415 						adev->gfx.ip_dump_compute_queues[index + reg] =
7416 							RREG32(SOC15_REG_ENTRY_OFFSET(
7417 								       gc_cp_reg_list_9[reg]));
7418 				}
7419 				index += reg_count;
7420 			}
7421 		}
7422 	}
7423 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
7424 	mutex_unlock(&adev->srbm_mutex);
7425 	amdgpu_gfx_off_ctrl(adev, true);
7426 
7427 }
7428 
7429 static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
7430 {
7431 	/* Emit the cleaner shader */
7432 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
7433 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
7434 }
7435 
7436 static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
7437 {
7438 	struct amdgpu_device *adev = ring->adev;
7439 	struct amdgpu_ip_block *gfx_block =
7440 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
7441 
7442 	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
7443 
7444 	/* Raven and PCO APUs seem to have stability issues
7445 	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
7446 	 * submission and allow again afterwards.
7447 	 */
7448 	if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
7449 		gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_UNGATE);
7450 }
7451 
7452 static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
7453 {
7454 	struct amdgpu_device *adev = ring->adev;
7455 	struct amdgpu_ip_block *gfx_block =
7456 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
7457 
7458 	/* Raven and PCO APUs seem to have stability issues
7459 	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
7460 	 * submission and allow again afterwards.
7461 	 */
7462 	if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
7463 		gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_GATE);
7464 
7465 	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
7466 }
7467 
7468 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
7469 	.name = "gfx_v9_0",
7470 	.early_init = gfx_v9_0_early_init,
7471 	.late_init = gfx_v9_0_late_init,
7472 	.sw_init = gfx_v9_0_sw_init,
7473 	.sw_fini = gfx_v9_0_sw_fini,
7474 	.hw_init = gfx_v9_0_hw_init,
7475 	.hw_fini = gfx_v9_0_hw_fini,
7476 	.suspend = gfx_v9_0_suspend,
7477 	.resume = gfx_v9_0_resume,
7478 	.is_idle = gfx_v9_0_is_idle,
7479 	.wait_for_idle = gfx_v9_0_wait_for_idle,
7480 	.soft_reset = gfx_v9_0_soft_reset,
7481 	.set_clockgating_state = gfx_v9_0_set_clockgating_state,
7482 	.set_powergating_state = gfx_v9_0_set_powergating_state,
7483 	.get_clockgating_state = gfx_v9_0_get_clockgating_state,
7484 	.dump_ip_state = gfx_v9_ip_dump,
7485 	.print_ip_state = gfx_v9_ip_print,
7486 };
7487 
7488 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
7489 	.type = AMDGPU_RING_TYPE_GFX,
7490 	.align_mask = 0xff,
7491 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7492 	.support_64bit_ptrs = true,
7493 	.secure_submission_supported = true,
7494 	.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
7495 	.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
7496 	.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
7497 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
7498 		5 +  /* COND_EXEC */
7499 		7 +  /* PIPELINE_SYNC */
7500 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7501 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7502 		2 + /* VM_FLUSH */
7503 		8 +  /* FENCE for VM_FLUSH */
7504 		20 + /* GDS switch */
7505 		4 + /* double SWITCH_BUFFER,
7506 		       the first COND_EXEC jump to the place just
7507 			   prior to this double SWITCH_BUFFER  */
7508 		5 + /* COND_EXEC */
7509 		7 +	 /*	HDP_flush */
7510 		4 +	 /*	VGT_flush */
7511 		14 + /*	CE_META */
7512 		31 + /*	DE_META */
7513 		3 + /* CNTX_CTRL */
7514 		5 + /* HDP_INVL */
7515 		8 + 8 + /* FENCE x2 */
7516 		2 + /* SWITCH_BUFFER */
7517 		7 + /* gfx_v9_0_emit_mem_sync */
7518 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7519 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
7520 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
7521 	.emit_fence = gfx_v9_0_ring_emit_fence,
7522 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7523 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7524 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7525 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7526 	.test_ring = gfx_v9_0_ring_test_ring,
7527 	.insert_nop = gfx_v9_ring_insert_nop,
7528 	.pad_ib = amdgpu_ring_generic_pad_ib,
7529 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
7530 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
7531 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
7532 	.preempt_ib = gfx_v9_0_ring_preempt_ib,
7533 	.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
7534 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7535 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7536 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7537 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7538 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7539 	.reset = gfx_v9_0_reset_kgq,
7540 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7541 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
7542 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
7543 };
7544 
7545 static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
7546 	.type = AMDGPU_RING_TYPE_GFX,
7547 	.align_mask = 0xff,
7548 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7549 	.support_64bit_ptrs = true,
7550 	.secure_submission_supported = true,
7551 	.get_rptr = amdgpu_sw_ring_get_rptr_gfx,
7552 	.get_wptr = amdgpu_sw_ring_get_wptr_gfx,
7553 	.set_wptr = amdgpu_sw_ring_set_wptr_gfx,
7554 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
7555 		5 +  /* COND_EXEC */
7556 		7 +  /* PIPELINE_SYNC */
7557 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7558 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7559 		2 + /* VM_FLUSH */
7560 		8 +  /* FENCE for VM_FLUSH */
7561 		20 + /* GDS switch */
7562 		4 + /* double SWITCH_BUFFER,
7563 		     * the first COND_EXEC jump to the place just
7564 		     * prior to this double SWITCH_BUFFER
7565 		     */
7566 		5 + /* COND_EXEC */
7567 		7 +	 /*	HDP_flush */
7568 		4 +	 /*	VGT_flush */
7569 		14 + /*	CE_META */
7570 		31 + /*	DE_META */
7571 		3 + /* CNTX_CTRL */
7572 		5 + /* HDP_INVL */
7573 		8 + 8 + /* FENCE x2 */
7574 		2 + /* SWITCH_BUFFER */
7575 		7 + /* gfx_v9_0_emit_mem_sync */
7576 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7577 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
7578 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
7579 	.emit_fence = gfx_v9_0_ring_emit_fence,
7580 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7581 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7582 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7583 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7584 	.test_ring = gfx_v9_0_ring_test_ring,
7585 	.test_ib = gfx_v9_0_ring_test_ib,
7586 	.insert_nop = gfx_v9_ring_insert_nop,
7587 	.pad_ib = amdgpu_ring_generic_pad_ib,
7588 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
7589 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
7590 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
7591 	.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
7592 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7593 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7594 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7595 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7596 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7597 	.patch_cntl = gfx_v9_0_ring_patch_cntl,
7598 	.patch_de = gfx_v9_0_ring_patch_de_meta,
7599 	.patch_ce = gfx_v9_0_ring_patch_ce_meta,
7600 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7601 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
7602 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
7603 };
7604 
7605 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7606 	.type = AMDGPU_RING_TYPE_COMPUTE,
7607 	.align_mask = 0xff,
7608 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7609 	.support_64bit_ptrs = true,
7610 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
7611 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
7612 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
7613 	.emit_frame_size =
7614 		20 + /* gfx_v9_0_ring_emit_gds_switch */
7615 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
7616 		5 + /* hdp invalidate */
7617 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
7618 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7619 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7620 		8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7621 		7 + /* gfx_v9_0_emit_mem_sync */
7622 		5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
7623 		15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
7624 		2, /* gfx_v9_0_ring_emit_cleaner_shader */
7625 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
7626 	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
7627 	.emit_fence = gfx_v9_0_ring_emit_fence,
7628 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7629 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7630 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7631 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7632 	.test_ring = gfx_v9_0_ring_test_ring,
7633 	.test_ib = gfx_v9_0_ring_test_ib,
7634 	.insert_nop = gfx_v9_ring_insert_nop,
7635 	.pad_ib = amdgpu_ring_generic_pad_ib,
7636 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7637 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7638 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7639 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
7640 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
7641 	.emit_wave_limit = gfx_v9_0_emit_wave_limit,
7642 	.reset = gfx_v9_0_reset_kcq,
7643 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7644 	.begin_use = gfx_v9_0_ring_begin_use_compute,
7645 	.end_use = gfx_v9_0_ring_end_use_compute,
7646 };
7647 
7648 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7649 	.type = AMDGPU_RING_TYPE_KIQ,
7650 	.align_mask = 0xff,
7651 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7652 	.support_64bit_ptrs = true,
7653 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
7654 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
7655 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
7656 	.emit_frame_size =
7657 		20 + /* gfx_v9_0_ring_emit_gds_switch */
7658 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
7659 		5 + /* hdp invalidate */
7660 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
7661 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7662 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7663 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
7664 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
7665 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
7666 	.test_ring = gfx_v9_0_ring_test_ring,
7667 	.insert_nop = amdgpu_ring_insert_nop,
7668 	.pad_ib = amdgpu_ring_generic_pad_ib,
7669 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
7670 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
7671 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7672 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7673 };
7674 
7675 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
7676 {
7677 	int i;
7678 
7679 	adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
7680 
7681 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7682 		adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
7683 
7684 	if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
7685 		for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
7686 			adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx;
7687 	}
7688 
7689 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
7690 		adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
7691 }
7692 
7693 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
7694 	.set = gfx_v9_0_set_eop_interrupt_state,
7695 	.process = gfx_v9_0_eop_irq,
7696 };
7697 
7698 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
7699 	.set = gfx_v9_0_set_priv_reg_fault_state,
7700 	.process = gfx_v9_0_priv_reg_irq,
7701 };
7702 
7703 static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = {
7704 	.set = gfx_v9_0_set_bad_op_fault_state,
7705 	.process = gfx_v9_0_bad_op_irq,
7706 };
7707 
7708 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
7709 	.set = gfx_v9_0_set_priv_inst_fault_state,
7710 	.process = gfx_v9_0_priv_inst_irq,
7711 };
7712 
7713 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
7714 	.set = gfx_v9_0_set_cp_ecc_error_state,
7715 	.process = amdgpu_gfx_cp_ecc_error_irq,
7716 };
7717 
7718 
7719 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
7720 {
7721 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7722 	adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
7723 
7724 	adev->gfx.priv_reg_irq.num_types = 1;
7725 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
7726 
7727 	adev->gfx.bad_op_irq.num_types = 1;
7728 	adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs;
7729 
7730 	adev->gfx.priv_inst_irq.num_types = 1;
7731 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
7732 
7733 	adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
7734 	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
7735 }
7736 
7737 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
7738 {
7739 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7740 	case IP_VERSION(9, 0, 1):
7741 	case IP_VERSION(9, 2, 1):
7742 	case IP_VERSION(9, 4, 0):
7743 	case IP_VERSION(9, 2, 2):
7744 	case IP_VERSION(9, 1, 0):
7745 	case IP_VERSION(9, 4, 1):
7746 	case IP_VERSION(9, 3, 0):
7747 	case IP_VERSION(9, 4, 2):
7748 		adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
7749 		break;
7750 	default:
7751 		break;
7752 	}
7753 }
7754 
7755 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
7756 {
7757 	/* init asci gds info */
7758 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7759 	case IP_VERSION(9, 0, 1):
7760 	case IP_VERSION(9, 2, 1):
7761 	case IP_VERSION(9, 4, 0):
7762 		adev->gds.gds_size = 0x10000;
7763 		break;
7764 	case IP_VERSION(9, 2, 2):
7765 	case IP_VERSION(9, 1, 0):
7766 	case IP_VERSION(9, 4, 1):
7767 		adev->gds.gds_size = 0x1000;
7768 		break;
7769 	case IP_VERSION(9, 4, 2):
7770 		/* aldebaran removed all the GDS internal memory,
7771 		 * only support GWS opcode in kernel, like barrier
7772 		 * semaphore.etc */
7773 		adev->gds.gds_size = 0;
7774 		break;
7775 	default:
7776 		adev->gds.gds_size = 0x10000;
7777 		break;
7778 	}
7779 
7780 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7781 	case IP_VERSION(9, 0, 1):
7782 	case IP_VERSION(9, 4, 0):
7783 		adev->gds.gds_compute_max_wave_id = 0x7ff;
7784 		break;
7785 	case IP_VERSION(9, 2, 1):
7786 		adev->gds.gds_compute_max_wave_id = 0x27f;
7787 		break;
7788 	case IP_VERSION(9, 2, 2):
7789 	case IP_VERSION(9, 1, 0):
7790 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
7791 			adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
7792 		else
7793 			adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
7794 		break;
7795 	case IP_VERSION(9, 4, 1):
7796 		adev->gds.gds_compute_max_wave_id = 0xfff;
7797 		break;
7798 	case IP_VERSION(9, 4, 2):
7799 		/* deprecated for Aldebaran, no usage at all */
7800 		adev->gds.gds_compute_max_wave_id = 0;
7801 		break;
7802 	default:
7803 		/* this really depends on the chip */
7804 		adev->gds.gds_compute_max_wave_id = 0x7ff;
7805 		break;
7806 	}
7807 
7808 	adev->gds.gws_size = 64;
7809 	adev->gds.oa_size = 16;
7810 }
7811 
7812 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7813 						 u32 bitmap)
7814 {
7815 	u32 data;
7816 
7817 	if (!bitmap)
7818 		return;
7819 
7820 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7821 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7822 
7823 	WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
7824 }
7825 
7826 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7827 {
7828 	u32 data, mask;
7829 
7830 	data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
7831 	data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
7832 
7833 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7834 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7835 
7836 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7837 
7838 	return (~data) & mask;
7839 }
7840 
7841 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
7842 				 struct amdgpu_cu_info *cu_info)
7843 {
7844 	int i, j, k, counter, active_cu_number = 0;
7845 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7846 	unsigned disable_masks[4 * 4];
7847 
7848 	if (!adev || !cu_info)
7849 		return -EINVAL;
7850 
7851 	/*
7852 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
7853 	 */
7854 	if (adev->gfx.config.max_shader_engines *
7855 		adev->gfx.config.max_sh_per_se > 16)
7856 		return -EINVAL;
7857 
7858 	amdgpu_gfx_parse_disable_cu(disable_masks,
7859 				    adev->gfx.config.max_shader_engines,
7860 				    adev->gfx.config.max_sh_per_se);
7861 
7862 	mutex_lock(&adev->grbm_idx_mutex);
7863 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7864 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7865 			mask = 1;
7866 			ao_bitmap = 0;
7867 			counter = 0;
7868 			amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
7869 			gfx_v9_0_set_user_cu_inactive_bitmap(
7870 				adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
7871 			bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
7872 
7873 			/*
7874 			 * The bitmap(and ao_cu_bitmap) in cu_info structure is
7875 			 * 4x4 size array, and it's usually suitable for Vega
7876 			 * ASICs which has 4*2 SE/SH layout.
7877 			 * But for Arcturus, SE/SH layout is changed to 8*1.
7878 			 * To mostly reduce the impact, we make it compatible
7879 			 * with current bitmap array as below:
7880 			 *    SE4,SH0 --> bitmap[0][1]
7881 			 *    SE5,SH0 --> bitmap[1][1]
7882 			 *    SE6,SH0 --> bitmap[2][1]
7883 			 *    SE7,SH0 --> bitmap[3][1]
7884 			 */
7885 			cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
7886 
7887 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7888 				if (bitmap & mask) {
7889 					if (counter < adev->gfx.config.max_cu_per_sh)
7890 						ao_bitmap |= mask;
7891 					counter ++;
7892 				}
7893 				mask <<= 1;
7894 			}
7895 			active_cu_number += counter;
7896 			if (i < 2 && j < 2)
7897 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7898 			cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7899 		}
7900 	}
7901 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7902 	mutex_unlock(&adev->grbm_idx_mutex);
7903 
7904 	cu_info->number = active_cu_number;
7905 	cu_info->ao_cu_mask = ao_cu_mask;
7906 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7907 
7908 	return 0;
7909 }
7910 
7911 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7912 {
7913 	.type = AMD_IP_BLOCK_TYPE_GFX,
7914 	.major = 9,
7915 	.minor = 0,
7916 	.rev = 0,
7917 	.funcs = &gfx_v9_0_ip_funcs,
7918 };
7919