xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c (revision 00e08fb2e7ce88e2ae366cbc79997d71d014b0ac)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "imu_v11_0.h"
33 #include "soc21.h"
34 #include "nvd.h"
35 
36 #include "gc/gc_11_0_0_offset.h"
37 #include "gc/gc_11_0_0_sh_mask.h"
38 #include "smuio/smuio_13_0_6_offset.h"
39 #include "smuio/smuio_13_0_6_sh_mask.h"
40 #include "navi10_enum.h"
41 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
42 
43 #include "soc15.h"
44 #include "clearstate_gfx11.h"
45 #include "v11_structs.h"
46 #include "gfx_v11_0.h"
47 #include "gfx_v11_0_cleaner_shader.h"
48 #include "gfx_v11_0_3.h"
49 #include "nbio_v4_3.h"
50 #include "mes_v11_0.h"
51 #include "mes_userqueue.h"
52 #include "amdgpu_userq_fence.h"
53 
54 #define GFX11_NUM_GFX_RINGS		1
55 #define GFX11_MEC_HPD_SIZE	2048
56 
57 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
58 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1	0x1388
59 
60 #define regCGTT_WD_CLK_CTRL		0x5086
61 #define regCGTT_WD_CLK_CTRL_BASE_IDX	1
62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1	0x4e7e
63 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX	1
64 #define regPC_CONFIG_CNTL_1		0x194d
65 #define regPC_CONFIG_CNTL_1_BASE_IDX	1
66 
67 #define regCP_GFX_MQD_CONTROL_DEFAULT                                             0x00000100
68 #define regCP_GFX_HQD_VMID_DEFAULT                                                0x00000000
69 #define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT                                      0x00000000
70 #define regCP_GFX_HQD_QUANTUM_DEFAULT                                             0x00000a01
71 #define regCP_GFX_HQD_CNTL_DEFAULT                                                0x00a00000
72 #define regCP_RB_DOORBELL_CONTROL_DEFAULT                                         0x00000000
73 #define regCP_GFX_HQD_RPTR_DEFAULT                                                0x00000000
74 
75 #define regCP_HQD_EOP_CONTROL_DEFAULT                                             0x00000006
76 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
77 #define regCP_MQD_CONTROL_DEFAULT                                                 0x00000100
78 #define regCP_HQD_PQ_CONTROL_DEFAULT                                              0x00308509
79 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
80 #define regCP_HQD_PQ_RPTR_DEFAULT                                                 0x00000000
81 #define regCP_HQD_PERSISTENT_STATE_DEFAULT                                        0x0be05501
82 #define regCP_HQD_IB_CONTROL_DEFAULT                                              0x00300000
83 
84 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
86 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
87 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
88 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
89 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
90 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
91 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
93 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
94 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
95 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
97 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
98 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
99 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
100 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
101 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
102 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
103 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
104 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
105 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
106 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
107 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin");
108 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
109 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
110 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
111 MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin");
112 MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin");
113 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin");
114 MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin");
115 MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin");
116 MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin");
117 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin");
118 MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin");
119 MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.bin");
120 MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin");
121 MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin");
122 MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin");
123 
124 static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
125 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
126 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
127 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3),
128 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
139 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
140 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
143 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR),
144 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE),
145 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR),
146 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR),
147 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_BASE),
148 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_RPTR),
149 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_WPTR),
150 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
151 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ),
152 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
153 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
154 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
155 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO),
156 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI),
157 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ),
158 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
159 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
160 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
161 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
162 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
163 	SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS),
164 	SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2),
165 	SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS),
166 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
167 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
168 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES),
169 	SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS),
170 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
171 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL),
172 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS),
173 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
174 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
175 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL),
176 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
177 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_DEBUG_INTERRUPT_INSTR_PNTR),
178 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR),
179 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR),
180 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
181 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
182 	/* cp header registers */
183 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
184 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
185 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
186 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
187 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
188 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
189 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
190 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
191 	/* SE status registers */
192 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
193 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
194 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
195 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3),
196 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE4),
197 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE5)
198 };
199 
200 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
201 	/* compute registers */
202 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
203 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
204 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
205 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
206 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
207 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
208 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
209 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
210 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
211 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
212 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
213 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
214 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
215 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
216 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
217 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
218 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
219 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
220 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
221 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
222 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
223 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
224 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
225 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
226 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
227 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
228 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
229 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
230 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
231 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
232 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
233 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
234 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
235 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
236 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
237 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
238 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
239 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
240 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
241 	/* cp header registers */
242 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
243 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
244 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
245 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
246 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
247 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
248 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
249 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
250 };
251 
252 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
253 	/* gfx queue registers */
254 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE),
255 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID),
256 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY),
257 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM),
258 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE),
259 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI),
260 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET),
261 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL),
262 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR),
263 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR),
264 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI),
265 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST),
266 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED),
267 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL),
268 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0),
269 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0),
270 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR),
271 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI),
272 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO),
273 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI),
274 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
275 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
276 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
277 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
278 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
279 	/* cp header registers */
280 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
281 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
282 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
283 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
284 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
285 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
286 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
287 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
288 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
289 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
290 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
291 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
292 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
293 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
294 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
295 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
296 };
297 
298 static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
299 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
300 };
301 
302 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
303 {
304 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
305 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
306 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
307 	SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
308 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
309 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
310 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
311 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
312 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
313 };
314 
315 #define DEFAULT_SH_MEM_CONFIG \
316 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
317 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
318 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
319 
320 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
321 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
322 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
323 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
324 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
325 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
326 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
327 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
328                                  struct amdgpu_cu_info *cu_info);
329 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
330 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
331 				   u32 sh_num, u32 instance, int xcc_id);
332 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
333 
334 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
335 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
336 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
337 				     uint32_t val);
338 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
339 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
340 					   uint16_t pasid, uint32_t flush_type,
341 					   bool all_hub, uint8_t dst_sel);
342 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
343 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
344 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
345 				      bool enable);
346 
347 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
348 {
349 	struct amdgpu_device *adev = kiq_ring->adev;
350 	u64 shader_mc_addr;
351 
352 	/* Cleaner shader MC address */
353 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
354 
355 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
356 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
357 			  PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
358 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
359 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
360 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
361 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
362 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
363 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
364 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
365 }
366 
367 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
368 				 struct amdgpu_ring *ring)
369 {
370 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
371 	uint64_t wptr_addr = ring->wptr_gpu_addr;
372 	uint32_t me = 0, eng_sel = 0;
373 
374 	switch (ring->funcs->type) {
375 	case AMDGPU_RING_TYPE_COMPUTE:
376 		me = 1;
377 		eng_sel = 0;
378 		break;
379 	case AMDGPU_RING_TYPE_GFX:
380 		me = 0;
381 		eng_sel = 4;
382 		break;
383 	case AMDGPU_RING_TYPE_MES:
384 		me = 2;
385 		eng_sel = 5;
386 		break;
387 	default:
388 		WARN_ON(1);
389 	}
390 
391 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
392 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
393 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
394 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
395 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
396 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
397 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
398 			  PACKET3_MAP_QUEUES_ME((me)) |
399 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
400 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
401 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
402 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
403 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
404 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
405 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
406 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
407 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
408 }
409 
410 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
411 				   struct amdgpu_ring *ring,
412 				   enum amdgpu_unmap_queues_action action,
413 				   u64 gpu_addr, u64 seq)
414 {
415 	struct amdgpu_device *adev = kiq_ring->adev;
416 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
417 
418 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
419 		amdgpu_mes_unmap_legacy_queue(adev, ring, action,
420 					      gpu_addr, seq, 0);
421 		return;
422 	}
423 
424 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
425 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
426 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
427 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
428 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
429 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
430 	amdgpu_ring_write(kiq_ring,
431 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
432 
433 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
434 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
435 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
436 		amdgpu_ring_write(kiq_ring, seq);
437 	} else {
438 		amdgpu_ring_write(kiq_ring, 0);
439 		amdgpu_ring_write(kiq_ring, 0);
440 		amdgpu_ring_write(kiq_ring, 0);
441 	}
442 }
443 
444 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
445 				   struct amdgpu_ring *ring,
446 				   u64 addr,
447 				   u64 seq)
448 {
449 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
450 
451 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
452 	amdgpu_ring_write(kiq_ring,
453 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
454 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
455 			  PACKET3_QUERY_STATUS_COMMAND(2));
456 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
457 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
458 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
459 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
460 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
461 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
462 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
463 }
464 
465 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
466 				uint16_t pasid, uint32_t flush_type,
467 				bool all_hub)
468 {
469 	gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
470 }
471 
472 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
473 	.kiq_set_resources = gfx11_kiq_set_resources,
474 	.kiq_map_queues = gfx11_kiq_map_queues,
475 	.kiq_unmap_queues = gfx11_kiq_unmap_queues,
476 	.kiq_query_status = gfx11_kiq_query_status,
477 	.kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
478 	.set_resources_size = 8,
479 	.map_queues_size = 7,
480 	.unmap_queues_size = 6,
481 	.query_status_size = 7,
482 	.invalidate_tlbs_size = 2,
483 };
484 
485 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
486 {
487 	adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
488 }
489 
490 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
491 {
492 	if (amdgpu_sriov_vf(adev))
493 		return;
494 
495 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
496 	case IP_VERSION(11, 0, 1):
497 	case IP_VERSION(11, 0, 4):
498 		soc15_program_register_sequence(adev,
499 						golden_settings_gc_11_0_1,
500 						(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
501 		break;
502 	default:
503 		break;
504 	}
505 	soc15_program_register_sequence(adev,
506 					golden_settings_gc_11_0,
507 					(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
508 
509 }
510 
511 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
512 				       bool wc, uint32_t reg, uint32_t val)
513 {
514 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
515 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
516 			  WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
517 	amdgpu_ring_write(ring, reg);
518 	amdgpu_ring_write(ring, 0);
519 	amdgpu_ring_write(ring, val);
520 }
521 
522 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
523 				  int mem_space, int opt, uint32_t addr0,
524 				  uint32_t addr1, uint32_t ref, uint32_t mask,
525 				  uint32_t inv)
526 {
527 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
528 	amdgpu_ring_write(ring,
529 			  /* memory (1) or register (0) */
530 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
531 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
532 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
533 			   WAIT_REG_MEM_ENGINE(eng_sel)));
534 
535 	if (mem_space)
536 		BUG_ON(addr0 & 0x3); /* Dword align */
537 	amdgpu_ring_write(ring, addr0);
538 	amdgpu_ring_write(ring, addr1);
539 	amdgpu_ring_write(ring, ref);
540 	amdgpu_ring_write(ring, mask);
541 	amdgpu_ring_write(ring, inv); /* poll interval */
542 }
543 
544 static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
545 {
546 	/* Header itself is a NOP packet */
547 	if (num_nop == 1) {
548 		amdgpu_ring_write(ring, ring->funcs->nop);
549 		return;
550 	}
551 
552 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
553 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
554 
555 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
556 	amdgpu_ring_insert_nop(ring, num_nop - 1);
557 }
558 
559 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
560 {
561 	struct amdgpu_device *adev = ring->adev;
562 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
563 	uint32_t tmp = 0;
564 	unsigned i;
565 	int r;
566 
567 	WREG32(scratch, 0xCAFEDEAD);
568 	r = amdgpu_ring_alloc(ring, 5);
569 	if (r) {
570 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
571 			  ring->idx, r);
572 		return r;
573 	}
574 
575 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
576 		gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
577 	} else {
578 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
579 		amdgpu_ring_write(ring, scratch -
580 				  PACKET3_SET_UCONFIG_REG_START);
581 		amdgpu_ring_write(ring, 0xDEADBEEF);
582 	}
583 	amdgpu_ring_commit(ring);
584 
585 	for (i = 0; i < adev->usec_timeout; i++) {
586 		tmp = RREG32(scratch);
587 		if (tmp == 0xDEADBEEF)
588 			break;
589 		if (amdgpu_emu_mode == 1)
590 			msleep(1);
591 		else
592 			udelay(1);
593 	}
594 
595 	if (i >= adev->usec_timeout)
596 		r = -ETIMEDOUT;
597 	return r;
598 }
599 
600 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
601 {
602 	struct amdgpu_device *adev = ring->adev;
603 	struct amdgpu_ib ib;
604 	struct dma_fence *f = NULL;
605 	unsigned index;
606 	uint64_t gpu_addr;
607 	uint32_t *cpu_ptr;
608 	long r;
609 
610 	/* MES KIQ fw hasn't indirect buffer support for now */
611 	if (adev->enable_mes_kiq &&
612 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
613 		return 0;
614 
615 	memset(&ib, 0, sizeof(ib));
616 
617 	r = amdgpu_device_wb_get(adev, &index);
618 	if (r)
619 		return r;
620 
621 	gpu_addr = adev->wb.gpu_addr + (index * 4);
622 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
623 	cpu_ptr = &adev->wb.wb[index];
624 
625 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
626 	if (r) {
627 		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
628 		goto err1;
629 	}
630 
631 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
632 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
633 	ib.ptr[2] = lower_32_bits(gpu_addr);
634 	ib.ptr[3] = upper_32_bits(gpu_addr);
635 	ib.ptr[4] = 0xDEADBEEF;
636 	ib.length_dw = 5;
637 
638 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
639 	if (r)
640 		goto err2;
641 
642 	r = dma_fence_wait_timeout(f, false, timeout);
643 	if (r == 0) {
644 		r = -ETIMEDOUT;
645 		goto err2;
646 	} else if (r < 0) {
647 		goto err2;
648 	}
649 
650 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
651 		r = 0;
652 	else
653 		r = -EINVAL;
654 err2:
655 	amdgpu_ib_free(&ib, NULL);
656 	dma_fence_put(f);
657 err1:
658 	amdgpu_device_wb_free(adev, index);
659 	return r;
660 }
661 
662 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
663 {
664 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
665 	amdgpu_ucode_release(&adev->gfx.me_fw);
666 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
667 	amdgpu_ucode_release(&adev->gfx.mec_fw);
668 
669 	kfree(adev->gfx.rlc.register_list_format);
670 }
671 
672 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
673 {
674 	const struct psp_firmware_header_v1_0 *toc_hdr;
675 	int err = 0;
676 
677 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
678 				   AMDGPU_UCODE_REQUIRED,
679 				   "amdgpu/%s_toc.bin", ucode_prefix);
680 	if (err)
681 		goto out;
682 
683 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
684 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
685 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
686 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
687 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
688 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
689 	return 0;
690 out:
691 	amdgpu_ucode_release(&adev->psp.toc_fw);
692 	return err;
693 }
694 
695 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
696 {
697 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
698 	case IP_VERSION(11, 0, 0):
699 	case IP_VERSION(11, 0, 2):
700 	case IP_VERSION(11, 0, 3):
701 		if ((adev->gfx.me_fw_version >= 1505) &&
702 		    (adev->gfx.pfp_fw_version >= 1600) &&
703 		    (adev->gfx.mec_fw_version >= 512)) {
704 			if (amdgpu_sriov_vf(adev))
705 				adev->gfx.cp_gfx_shadow = true;
706 			else
707 				adev->gfx.cp_gfx_shadow = false;
708 		}
709 		break;
710 	default:
711 		adev->gfx.cp_gfx_shadow = false;
712 		break;
713 	}
714 }
715 
716 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
717 {
718 	char ucode_prefix[25];
719 	int err;
720 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
721 	uint16_t version_major;
722 	uint16_t version_minor;
723 
724 	DRM_DEBUG("\n");
725 
726 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
727 	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
728 				   AMDGPU_UCODE_REQUIRED,
729 				   "amdgpu/%s_pfp.bin", ucode_prefix);
730 	if (err)
731 		goto out;
732 	/* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
733 	adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
734 				(union amdgpu_firmware_header *)
735 				adev->gfx.pfp_fw->data, 2, 0);
736 	if (adev->gfx.rs64_enable) {
737 		dev_info(adev->dev, "CP RS64 enable\n");
738 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
739 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
740 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
741 	} else {
742 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
743 	}
744 
745 	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
746 				   AMDGPU_UCODE_REQUIRED,
747 				   "amdgpu/%s_me.bin", ucode_prefix);
748 	if (err)
749 		goto out;
750 	if (adev->gfx.rs64_enable) {
751 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
752 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
753 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
754 	} else {
755 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
756 	}
757 
758 	if (!amdgpu_sriov_vf(adev)) {
759 		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
760 		    adev->pdev->revision == 0xCE)
761 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
762 						   AMDGPU_UCODE_REQUIRED,
763 						   "amdgpu/gc_11_0_0_rlc_1.bin");
764 		else if (amdgpu_is_kicker_fw(adev))
765 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
766 						   AMDGPU_UCODE_REQUIRED,
767 						   "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
768 		else
769 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
770 						   AMDGPU_UCODE_REQUIRED,
771 						   "amdgpu/%s_rlc.bin", ucode_prefix);
772 		if (err)
773 			goto out;
774 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
775 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
776 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
777 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
778 		if (err)
779 			goto out;
780 	}
781 
782 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
783 				   AMDGPU_UCODE_REQUIRED,
784 				   "amdgpu/%s_mec.bin", ucode_prefix);
785 	if (err)
786 		goto out;
787 	if (adev->gfx.rs64_enable) {
788 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
789 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
790 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
791 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
792 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
793 	} else {
794 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
795 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
796 	}
797 
798 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
799 		err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
800 
801 	/* only one MEC for gfx 11.0.0. */
802 	adev->gfx.mec2_fw = NULL;
803 
804 	gfx_v11_0_check_fw_cp_gfx_shadow(adev);
805 
806 	if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) {
807 		err = adev->gfx.imu.funcs->init_microcode(adev);
808 		if (err)
809 			DRM_ERROR("Failed to init imu firmware!\n");
810 		return err;
811 	}
812 
813 out:
814 	if (err) {
815 		amdgpu_ucode_release(&adev->gfx.pfp_fw);
816 		amdgpu_ucode_release(&adev->gfx.me_fw);
817 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
818 		amdgpu_ucode_release(&adev->gfx.mec_fw);
819 	}
820 
821 	return err;
822 }
823 
824 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
825 {
826 	u32 count = 0;
827 	const struct cs_section_def *sect = NULL;
828 	const struct cs_extent_def *ext = NULL;
829 
830 	/* begin clear state */
831 	count += 2;
832 	/* context control state */
833 	count += 3;
834 
835 	for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
836 		for (ext = sect->section; ext->extent != NULL; ++ext) {
837 			if (sect->id == SECT_CONTEXT)
838 				count += 2 + ext->reg_count;
839 			else
840 				return 0;
841 		}
842 	}
843 
844 	/* set PA_SC_TILE_STEERING_OVERRIDE */
845 	count += 3;
846 	/* end clear state */
847 	count += 2;
848 	/* clear state */
849 	count += 2;
850 
851 	return count;
852 }
853 
854 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
855 {
856 	u32 count = 0;
857 	int ctx_reg_offset;
858 
859 	if (adev->gfx.rlc.cs_data == NULL)
860 		return;
861 	if (buffer == NULL)
862 		return;
863 
864 	count = amdgpu_gfx_csb_preamble_start(buffer);
865 	count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
866 
867 	ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
868 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
869 	buffer[count++] = cpu_to_le32(ctx_reg_offset);
870 	buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
871 
872 	amdgpu_gfx_csb_preamble_end(buffer, count);
873 }
874 
875 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
876 {
877 	/* clear state block */
878 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
879 			&adev->gfx.rlc.clear_state_gpu_addr,
880 			(void **)&adev->gfx.rlc.cs_ptr);
881 
882 	/* jump table block */
883 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
884 			&adev->gfx.rlc.cp_table_gpu_addr,
885 			(void **)&adev->gfx.rlc.cp_table_ptr);
886 }
887 
888 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
889 {
890 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
891 
892 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
893 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
894 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
895 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
896 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
897 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
898 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
899 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
900 	adev->gfx.rlc.rlcg_reg_access_supported = true;
901 }
902 
903 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
904 {
905 	const struct cs_section_def *cs_data;
906 	int r;
907 
908 	adev->gfx.rlc.cs_data = gfx11_cs_data;
909 
910 	cs_data = adev->gfx.rlc.cs_data;
911 
912 	if (cs_data) {
913 		/* init clear state block */
914 		r = amdgpu_gfx_rlc_init_csb(adev);
915 		if (r)
916 			return r;
917 	}
918 
919 	/* init spm vmid with 0xf */
920 	if (adev->gfx.rlc.funcs->update_spm_vmid)
921 		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0, NULL, 0xf);
922 
923 	return 0;
924 }
925 
926 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
927 {
928 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
929 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
930 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
931 }
932 
933 static void gfx_v11_0_me_init(struct amdgpu_device *adev)
934 {
935 	bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
936 
937 	amdgpu_gfx_graphics_queue_acquire(adev);
938 }
939 
940 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
941 {
942 	int r;
943 	u32 *hpd;
944 	size_t mec_hpd_size;
945 
946 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
947 
948 	/* take ownership of the relevant compute queues */
949 	amdgpu_gfx_compute_queue_acquire(adev);
950 	mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
951 
952 	if (mec_hpd_size) {
953 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
954 					      AMDGPU_GEM_DOMAIN_GTT,
955 					      &adev->gfx.mec.hpd_eop_obj,
956 					      &adev->gfx.mec.hpd_eop_gpu_addr,
957 					      (void **)&hpd);
958 		if (r) {
959 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
960 			gfx_v11_0_mec_fini(adev);
961 			return r;
962 		}
963 
964 		memset(hpd, 0, mec_hpd_size);
965 
966 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
967 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
968 	}
969 
970 	return 0;
971 }
972 
973 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
974 {
975 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
976 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
977 		(address << SQ_IND_INDEX__INDEX__SHIFT));
978 	return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
979 }
980 
981 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
982 			   uint32_t thread, uint32_t regno,
983 			   uint32_t num, uint32_t *out)
984 {
985 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
986 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
987 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
988 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
989 		(SQ_IND_INDEX__AUTO_INCR_MASK));
990 	while (num--)
991 		*(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
992 }
993 
994 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
995 {
996 	/* in gfx11 the SIMD_ID is specified as part of the INSTANCE
997 	 * field when performing a select_se_sh so it should be
998 	 * zero here */
999 	WARN_ON(simd != 0);
1000 
1001 	/* type 3 wave data */
1002 	dst[(*no_fields)++] = 3;
1003 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
1004 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
1005 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
1006 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
1007 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
1008 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1009 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1010 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1011 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1012 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1013 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1014 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1015 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1016 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1017 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
1018 }
1019 
1020 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1021 				     uint32_t wave, uint32_t start,
1022 				     uint32_t size, uint32_t *dst)
1023 {
1024 	WARN_ON(simd != 0);
1025 
1026 	wave_read_regs(
1027 		adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1028 		dst);
1029 }
1030 
1031 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1032 				      uint32_t wave, uint32_t thread,
1033 				      uint32_t start, uint32_t size,
1034 				      uint32_t *dst)
1035 {
1036 	wave_read_regs(
1037 		adev, wave, thread,
1038 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1039 }
1040 
1041 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
1042 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
1043 {
1044 	soc21_grbm_select(adev, me, pipe, q, vm);
1045 }
1046 
1047 /* all sizes are in bytes */
1048 #define MQD_SHADOW_BASE_SIZE      73728
1049 #define MQD_SHADOW_BASE_ALIGNMENT 256
1050 #define MQD_FWWORKAREA_SIZE       484
1051 #define MQD_FWWORKAREA_ALIGNMENT  256
1052 
1053 static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
1054 					 struct amdgpu_gfx_shadow_info *shadow_info)
1055 {
1056 	/* for gfx */
1057 	shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
1058 	shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
1059 	shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
1060 	shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
1061 	/* for compute */
1062 	shadow_info->eop_size = GFX11_MEC_HPD_SIZE;
1063 	shadow_info->eop_alignment = 256;
1064 }
1065 
1066 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
1067 					 struct amdgpu_gfx_shadow_info *shadow_info,
1068 					 bool skip_check)
1069 {
1070 	if (adev->gfx.cp_gfx_shadow || skip_check) {
1071 		gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
1072 		return 0;
1073 	} else {
1074 		memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
1075 		return -ENOTSUPP;
1076 	}
1077 }
1078 
1079 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
1080 	.get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
1081 	.select_se_sh = &gfx_v11_0_select_se_sh,
1082 	.read_wave_data = &gfx_v11_0_read_wave_data,
1083 	.read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
1084 	.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
1085 	.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
1086 	.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
1087 	.get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
1088 	.get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
1089 };
1090 
1091 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
1092 {
1093 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1094 	case IP_VERSION(11, 0, 0):
1095 	case IP_VERSION(11, 0, 2):
1096 		adev->gfx.config.max_hw_contexts = 8;
1097 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1098 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1099 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1100 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1101 		break;
1102 	case IP_VERSION(11, 0, 3):
1103 		adev->gfx.ras = &gfx_v11_0_3_ras;
1104 		adev->gfx.config.max_hw_contexts = 8;
1105 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1106 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1107 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1108 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1109 		break;
1110 	case IP_VERSION(11, 0, 1):
1111 	case IP_VERSION(11, 0, 4):
1112 	case IP_VERSION(11, 5, 0):
1113 	case IP_VERSION(11, 5, 1):
1114 	case IP_VERSION(11, 5, 2):
1115 	case IP_VERSION(11, 5, 3):
1116 		adev->gfx.config.max_hw_contexts = 8;
1117 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1118 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1119 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
1120 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
1121 		break;
1122 	default:
1123 		BUG();
1124 		break;
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1131 				   int me, int pipe, int queue)
1132 {
1133 	struct amdgpu_ring *ring;
1134 	unsigned int irq_type;
1135 	unsigned int hw_prio;
1136 
1137 	ring = &adev->gfx.gfx_ring[ring_id];
1138 
1139 	ring->me = me;
1140 	ring->pipe = pipe;
1141 	ring->queue = queue;
1142 
1143 	ring->ring_obj = NULL;
1144 	ring->use_doorbell = true;
1145 	if (adev->gfx.disable_kq) {
1146 		ring->no_scheduler = true;
1147 		ring->no_user_submission = true;
1148 	}
1149 
1150 	if (!ring_id)
1151 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1152 	else
1153 		ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1154 	ring->vm_hub = AMDGPU_GFXHUB(0);
1155 	sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1156 
1157 	irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1158 	hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ?
1159 		AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1160 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1161 				hw_prio, NULL);
1162 }
1163 
1164 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1165 				       int mec, int pipe, int queue)
1166 {
1167 	int r;
1168 	unsigned irq_type;
1169 	struct amdgpu_ring *ring;
1170 	unsigned int hw_prio;
1171 
1172 	ring = &adev->gfx.compute_ring[ring_id];
1173 
1174 	/* mec0 is me1 */
1175 	ring->me = mec + 1;
1176 	ring->pipe = pipe;
1177 	ring->queue = queue;
1178 
1179 	ring->ring_obj = NULL;
1180 	ring->use_doorbell = true;
1181 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1182 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1183 				+ (ring_id * GFX11_MEC_HPD_SIZE);
1184 	ring->vm_hub = AMDGPU_GFXHUB(0);
1185 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1186 
1187 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1188 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1189 		+ ring->pipe;
1190 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1191 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1192 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1193 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1194 			     hw_prio, NULL);
1195 	if (r)
1196 		return r;
1197 
1198 	return 0;
1199 }
1200 
1201 static struct {
1202 	SOC21_FIRMWARE_ID	id;
1203 	unsigned int		offset;
1204 	unsigned int		size;
1205 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
1206 
1207 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
1208 {
1209 	RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
1210 
1211 	while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
1212 			(ucode->id < SOC21_FIRMWARE_ID_MAX)) {
1213 		rlc_autoload_info[ucode->id].id = ucode->id;
1214 		rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
1215 		rlc_autoload_info[ucode->id].size = ucode->size * 4;
1216 
1217 		ucode++;
1218 	}
1219 }
1220 
1221 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
1222 {
1223 	uint32_t total_size = 0;
1224 	SOC21_FIRMWARE_ID id;
1225 
1226 	gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
1227 
1228 	for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
1229 		total_size += rlc_autoload_info[id].size;
1230 
1231 	/* In case the offset in rlc toc ucode is aligned */
1232 	if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
1233 		total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
1234 			rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
1235 
1236 	return total_size;
1237 }
1238 
1239 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1240 {
1241 	int r;
1242 	uint32_t total_size;
1243 
1244 	total_size = gfx_v11_0_calc_toc_total_size(adev);
1245 
1246 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1247 				      AMDGPU_GEM_DOMAIN_VRAM |
1248 				      AMDGPU_GEM_DOMAIN_GTT,
1249 				      &adev->gfx.rlc.rlc_autoload_bo,
1250 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
1251 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1252 
1253 	if (r) {
1254 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1255 		return r;
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1262 					      SOC21_FIRMWARE_ID id,
1263 			    		      const void *fw_data,
1264 					      uint32_t fw_size,
1265 					      uint32_t *fw_autoload_mask)
1266 {
1267 	uint32_t toc_offset;
1268 	uint32_t toc_fw_size;
1269 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1270 
1271 	if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1272 		return;
1273 
1274 	toc_offset = rlc_autoload_info[id].offset;
1275 	toc_fw_size = rlc_autoload_info[id].size;
1276 
1277 	if (fw_size == 0)
1278 		fw_size = toc_fw_size;
1279 
1280 	if (fw_size > toc_fw_size)
1281 		fw_size = toc_fw_size;
1282 
1283 	memcpy(ptr + toc_offset, fw_data, fw_size);
1284 
1285 	if (fw_size < toc_fw_size)
1286 		memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1287 
1288 	if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1289 		*(uint64_t *)fw_autoload_mask |= 1ULL << id;
1290 }
1291 
1292 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1293 							uint32_t *fw_autoload_mask)
1294 {
1295 	void *data;
1296 	uint32_t size;
1297 	uint64_t *toc_ptr;
1298 
1299 	*(uint64_t *)fw_autoload_mask |= 0x1;
1300 
1301 	DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1302 
1303 	data = adev->psp.toc.start_addr;
1304 	size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1305 
1306 	toc_ptr = (uint64_t *)data + size / 8 - 1;
1307 	*toc_ptr = *(uint64_t *)fw_autoload_mask;
1308 
1309 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1310 					data, size, fw_autoload_mask);
1311 }
1312 
1313 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1314 							uint32_t *fw_autoload_mask)
1315 {
1316 	const __le32 *fw_data;
1317 	uint32_t fw_size;
1318 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1319 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1320 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1321 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1322 	uint16_t version_major, version_minor;
1323 
1324 	if (adev->gfx.rs64_enable) {
1325 		/* pfp ucode */
1326 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1327 			adev->gfx.pfp_fw->data;
1328 		/* instruction */
1329 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1330 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1331 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1332 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1333 						fw_data, fw_size, fw_autoload_mask);
1334 		/* data */
1335 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1336 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1337 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1338 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1339 						fw_data, fw_size, fw_autoload_mask);
1340 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1341 						fw_data, fw_size, fw_autoload_mask);
1342 		/* me ucode */
1343 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1344 			adev->gfx.me_fw->data;
1345 		/* instruction */
1346 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1347 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1348 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1349 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1350 						fw_data, fw_size, fw_autoload_mask);
1351 		/* data */
1352 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1353 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1354 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1355 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1356 						fw_data, fw_size, fw_autoload_mask);
1357 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1358 						fw_data, fw_size, fw_autoload_mask);
1359 		/* mec ucode */
1360 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1361 			adev->gfx.mec_fw->data;
1362 		/* instruction */
1363 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1364 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1365 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1366 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1367 						fw_data, fw_size, fw_autoload_mask);
1368 		/* data */
1369 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1370 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1371 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1372 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1373 						fw_data, fw_size, fw_autoload_mask);
1374 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1375 						fw_data, fw_size, fw_autoload_mask);
1376 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1377 						fw_data, fw_size, fw_autoload_mask);
1378 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1379 						fw_data, fw_size, fw_autoload_mask);
1380 	} else {
1381 		/* pfp ucode */
1382 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1383 			adev->gfx.pfp_fw->data;
1384 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1385 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1386 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1387 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1388 						fw_data, fw_size, fw_autoload_mask);
1389 
1390 		/* me ucode */
1391 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1392 			adev->gfx.me_fw->data;
1393 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1394 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1395 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1396 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1397 						fw_data, fw_size, fw_autoload_mask);
1398 
1399 		/* mec ucode */
1400 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1401 			adev->gfx.mec_fw->data;
1402 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1403 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1404 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1405 			cp_hdr->jt_size * 4;
1406 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1407 						fw_data, fw_size, fw_autoload_mask);
1408 	}
1409 
1410 	/* rlc ucode */
1411 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1412 		adev->gfx.rlc_fw->data;
1413 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1414 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1415 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1416 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1417 					fw_data, fw_size, fw_autoload_mask);
1418 
1419 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1420 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1421 	if (version_major == 2) {
1422 		if (version_minor >= 2) {
1423 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1424 
1425 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1426 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1427 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1428 			gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1429 					fw_data, fw_size, fw_autoload_mask);
1430 
1431 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1432 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1433 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1434 			gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1435 					fw_data, fw_size, fw_autoload_mask);
1436 		}
1437 	}
1438 }
1439 
1440 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1441 							uint32_t *fw_autoload_mask)
1442 {
1443 	const __le32 *fw_data;
1444 	uint32_t fw_size;
1445 	const struct sdma_firmware_header_v2_0 *sdma_hdr;
1446 
1447 	sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1448 		adev->sdma.instance[0].fw->data;
1449 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1450 			le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1451 	fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1452 
1453 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1454 			SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1455 
1456 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1457 			le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1458 	fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1459 
1460 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1461 			SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1462 }
1463 
1464 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1465 							uint32_t *fw_autoload_mask)
1466 {
1467 	const __le32 *fw_data;
1468 	unsigned fw_size;
1469 	const struct mes_firmware_header_v1_0 *mes_hdr;
1470 	int pipe, ucode_id, data_id;
1471 
1472 	for (pipe = 0; pipe < 2; pipe++) {
1473 		if (pipe==0) {
1474 			ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1475 			data_id  = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1476 		} else {
1477 			ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1478 			data_id  = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1479 		}
1480 
1481 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1482 			adev->mes.fw[pipe]->data;
1483 
1484 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1485 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1486 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1487 
1488 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1489 				ucode_id, fw_data, fw_size, fw_autoload_mask);
1490 
1491 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1492 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1493 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1494 
1495 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1496 				data_id, fw_data, fw_size, fw_autoload_mask);
1497 	}
1498 }
1499 
1500 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1501 {
1502 	uint32_t rlc_g_offset, rlc_g_size;
1503 	uint64_t gpu_addr;
1504 	uint32_t autoload_fw_id[2];
1505 
1506 	memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1507 
1508 	/* RLC autoload sequence 2: copy ucode */
1509 	gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1510 	gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1511 	gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1512 	gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1513 
1514 	rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1515 	rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1516 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1517 
1518 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1519 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1520 
1521 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1522 
1523 	/* RLC autoload sequence 3: load IMU fw */
1524 	if (adev->gfx.imu.funcs->load_microcode)
1525 		adev->gfx.imu.funcs->load_microcode(adev);
1526 	/* RLC autoload sequence 4 init IMU fw */
1527 	if (adev->gfx.imu.funcs->setup_imu)
1528 		adev->gfx.imu.funcs->setup_imu(adev);
1529 	if (adev->gfx.imu.funcs->start_imu)
1530 		adev->gfx.imu.funcs->start_imu(adev);
1531 
1532 	/* RLC autoload sequence 5 disable gpa mode */
1533 	gfx_v11_0_disable_gpa_mode(adev);
1534 
1535 	return 0;
1536 }
1537 
1538 static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
1539 {
1540 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
1541 	uint32_t *ptr;
1542 	uint32_t inst;
1543 
1544 	ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
1545 	if (!ptr) {
1546 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1547 		adev->gfx.ip_dump_core = NULL;
1548 	} else {
1549 		adev->gfx.ip_dump_core = ptr;
1550 	}
1551 
1552 	/* Allocate memory for compute queue registers for all the instances */
1553 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
1554 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1555 		adev->gfx.mec.num_queue_per_pipe;
1556 
1557 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1558 	if (!ptr) {
1559 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1560 		adev->gfx.ip_dump_compute_queues = NULL;
1561 	} else {
1562 		adev->gfx.ip_dump_compute_queues = ptr;
1563 	}
1564 
1565 	/* Allocate memory for gfx queue registers for all the instances */
1566 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
1567 	inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me *
1568 		adev->gfx.me.num_queue_per_pipe;
1569 
1570 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1571 	if (!ptr) {
1572 		DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
1573 		adev->gfx.ip_dump_gfx_queues = NULL;
1574 	} else {
1575 		adev->gfx.ip_dump_gfx_queues = ptr;
1576 	}
1577 }
1578 
1579 static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
1580 {
1581 	int i, j, k, r, ring_id;
1582 	int xcc_id = 0;
1583 	struct amdgpu_device *adev = ip_block->adev;
1584 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
1585 
1586 	INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
1587 
1588 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1589 	case IP_VERSION(11, 0, 0):
1590 	case IP_VERSION(11, 0, 1):
1591 	case IP_VERSION(11, 0, 2):
1592 	case IP_VERSION(11, 0, 3):
1593 	case IP_VERSION(11, 0, 4):
1594 	case IP_VERSION(11, 5, 0):
1595 	case IP_VERSION(11, 5, 1):
1596 	case IP_VERSION(11, 5, 2):
1597 	case IP_VERSION(11, 5, 3):
1598 		adev->gfx.me.num_me = 1;
1599 		adev->gfx.me.num_pipe_per_me = 1;
1600 		adev->gfx.me.num_queue_per_pipe = 2;
1601 		adev->gfx.mec.num_mec = 1;
1602 		adev->gfx.mec.num_pipe_per_mec = 4;
1603 		adev->gfx.mec.num_queue_per_pipe = 4;
1604 		break;
1605 	default:
1606 		adev->gfx.me.num_me = 1;
1607 		adev->gfx.me.num_pipe_per_me = 1;
1608 		adev->gfx.me.num_queue_per_pipe = 1;
1609 		adev->gfx.mec.num_mec = 1;
1610 		adev->gfx.mec.num_pipe_per_mec = 4;
1611 		adev->gfx.mec.num_queue_per_pipe = 8;
1612 		break;
1613 	}
1614 
1615 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1616 	case IP_VERSION(11, 0, 0):
1617 	case IP_VERSION(11, 0, 2):
1618 	case IP_VERSION(11, 0, 3):
1619 		if (!adev->gfx.disable_uq &&
1620 		    adev->gfx.me_fw_version  >= 2420 &&
1621 		    adev->gfx.pfp_fw_version >= 2580 &&
1622 		    adev->gfx.mec_fw_version >= 2650 &&
1623 		    adev->mes.fw_version[0] >= 120) {
1624 			adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
1625 			adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
1626 		}
1627 		break;
1628 	case IP_VERSION(11, 0, 1):
1629 	case IP_VERSION(11, 0, 4):
1630 	case IP_VERSION(11, 5, 0):
1631 	case IP_VERSION(11, 5, 1):
1632 	case IP_VERSION(11, 5, 2):
1633 	case IP_VERSION(11, 5, 3):
1634 		/* add firmware version checks here */
1635 		if (0 && !adev->gfx.disable_uq) {
1636 			adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
1637 			adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
1638 		}
1639 		break;
1640 	default:
1641 		break;
1642 	}
1643 
1644 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1645 	case IP_VERSION(11, 0, 0):
1646 	case IP_VERSION(11, 0, 2):
1647 	case IP_VERSION(11, 0, 3):
1648 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1649 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1650 		if (adev->gfx.me_fw_version  >= 2280 &&
1651 		    adev->gfx.pfp_fw_version >= 2370 &&
1652 		    adev->gfx.mec_fw_version >= 2450  &&
1653 		    adev->mes.fw_version[0] >= 99) {
1654 			adev->gfx.enable_cleaner_shader = true;
1655 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1656 			if (r) {
1657 				adev->gfx.enable_cleaner_shader = false;
1658 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1659 			}
1660 		}
1661 		break;
1662 	case IP_VERSION(11, 0, 1):
1663 	case IP_VERSION(11, 0, 4):
1664 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1665 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1666 		if (adev->gfx.pfp_fw_version >= 102 &&
1667 		    adev->gfx.mec_fw_version >= 66 &&
1668 		    adev->mes.fw_version[0] >= 128) {
1669 			adev->gfx.enable_cleaner_shader = true;
1670 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1671 			if (r) {
1672 				adev->gfx.enable_cleaner_shader = false;
1673 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1674 			}
1675 		}
1676 		break;
1677 	case IP_VERSION(11, 5, 0):
1678 	case IP_VERSION(11, 5, 1):
1679 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1680 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1681 		if (adev->gfx.mec_fw_version >= 26 &&
1682 		    adev->mes.fw_version[0] >= 114) {
1683 			adev->gfx.enable_cleaner_shader = true;
1684 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1685 			if (r) {
1686 				adev->gfx.enable_cleaner_shader = false;
1687 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1688 			}
1689 		}
1690 		break;
1691 	case IP_VERSION(11, 5, 2):
1692 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1693 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1694 		if (adev->gfx.me_fw_version  >= 12 &&
1695 		    adev->gfx.pfp_fw_version >= 15 &&
1696 		    adev->gfx.mec_fw_version >= 15) {
1697 			adev->gfx.enable_cleaner_shader = true;
1698 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1699 			if (r) {
1700 				adev->gfx.enable_cleaner_shader = false;
1701 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1702 			}
1703 		}
1704 		break;
1705 	case IP_VERSION(11, 5, 3):
1706 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1707 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1708 		if (adev->gfx.me_fw_version  >= 7 &&
1709 		    adev->gfx.pfp_fw_version >= 8 &&
1710 		    adev->gfx.mec_fw_version >= 8) {
1711 			adev->gfx.enable_cleaner_shader = true;
1712 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1713 			if (r) {
1714 				adev->gfx.enable_cleaner_shader = false;
1715 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1716 			}
1717 		}
1718 		break;
1719 	default:
1720 		adev->gfx.enable_cleaner_shader = false;
1721 		break;
1722 	}
1723 
1724 	/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1725 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) &&
1726 	    amdgpu_sriov_is_pp_one_vf(adev))
1727 		adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1728 
1729 	/* EOP Event */
1730 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1731 			      GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1732 			      &adev->gfx.eop_irq);
1733 	if (r)
1734 		return r;
1735 
1736 	/* Bad opcode Event */
1737 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1738 			      GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
1739 			      &adev->gfx.bad_op_irq);
1740 	if (r)
1741 		return r;
1742 
1743 	/* Privileged reg */
1744 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1745 			      GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1746 			      &adev->gfx.priv_reg_irq);
1747 	if (r)
1748 		return r;
1749 
1750 	/* Privileged inst */
1751 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1752 			      GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1753 			      &adev->gfx.priv_inst_irq);
1754 	if (r)
1755 		return r;
1756 
1757 	/* FED error */
1758 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1759 				  GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
1760 				  &adev->gfx.rlc_gc_fed_irq);
1761 	if (r)
1762 		return r;
1763 
1764 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1765 
1766 	gfx_v11_0_me_init(adev);
1767 
1768 	r = gfx_v11_0_rlc_init(adev);
1769 	if (r) {
1770 		DRM_ERROR("Failed to init rlc BOs!\n");
1771 		return r;
1772 	}
1773 
1774 	r = gfx_v11_0_mec_init(adev);
1775 	if (r) {
1776 		DRM_ERROR("Failed to init MEC BOs!\n");
1777 		return r;
1778 	}
1779 
1780 	if (adev->gfx.num_gfx_rings) {
1781 		ring_id = 0;
1782 		/* set up the gfx ring */
1783 		for (i = 0; i < adev->gfx.me.num_me; i++) {
1784 			for (j = 0; j < num_queue_per_pipe; j++) {
1785 				for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1786 					if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1787 						continue;
1788 
1789 					r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1790 								    i, k, j);
1791 					if (r)
1792 						return r;
1793 					ring_id++;
1794 				}
1795 			}
1796 		}
1797 	}
1798 
1799 	if (adev->gfx.num_compute_rings) {
1800 		ring_id = 0;
1801 		/* set up the compute queues - allocate horizontally across pipes */
1802 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1803 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1804 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1805 					if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
1806 									     k, j))
1807 						continue;
1808 
1809 					r = gfx_v11_0_compute_ring_init(adev, ring_id,
1810 									i, k, j);
1811 					if (r)
1812 						return r;
1813 
1814 					ring_id++;
1815 				}
1816 			}
1817 		}
1818 	}
1819 
1820 	adev->gfx.gfx_supported_reset =
1821 		amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
1822 	adev->gfx.compute_supported_reset =
1823 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1824 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1825 	case IP_VERSION(11, 0, 0):
1826 	case IP_VERSION(11, 0, 2):
1827 	case IP_VERSION(11, 0, 3):
1828 		if ((adev->gfx.me_fw_version >= 2280) &&
1829 		    (adev->gfx.mec_fw_version >= 2410) &&
1830 		    !amdgpu_sriov_vf(adev) &&
1831 		    !adev->debug_disable_gpu_ring_reset) {
1832 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1833 			adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1834 		}
1835 		break;
1836 	default:
1837 		if (!amdgpu_sriov_vf(adev) &&
1838 		    !adev->debug_disable_gpu_ring_reset) {
1839 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1840 			adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1841 		}
1842 		break;
1843 	}
1844 
1845 	if (!adev->enable_mes_kiq) {
1846 		r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
1847 		if (r) {
1848 			DRM_ERROR("Failed to init KIQ BOs!\n");
1849 			return r;
1850 		}
1851 
1852 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1853 		if (r)
1854 			return r;
1855 	}
1856 
1857 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
1858 	if (r)
1859 		return r;
1860 
1861 	/* allocate visible FB for rlc auto-loading fw */
1862 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1863 		r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1864 		if (r)
1865 			return r;
1866 	}
1867 
1868 	r = gfx_v11_0_gpu_early_init(adev);
1869 	if (r)
1870 		return r;
1871 
1872 	if (amdgpu_gfx_ras_sw_init(adev)) {
1873 		dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
1874 		return -EINVAL;
1875 	}
1876 
1877 	gfx_v11_0_alloc_ip_dump(adev);
1878 
1879 	r = amdgpu_gfx_sysfs_init(adev);
1880 	if (r)
1881 		return r;
1882 
1883 	return 0;
1884 }
1885 
1886 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1887 {
1888 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1889 			      &adev->gfx.pfp.pfp_fw_gpu_addr,
1890 			      (void **)&adev->gfx.pfp.pfp_fw_ptr);
1891 
1892 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1893 			      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1894 			      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1895 }
1896 
1897 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1898 {
1899 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1900 			      &adev->gfx.me.me_fw_gpu_addr,
1901 			      (void **)&adev->gfx.me.me_fw_ptr);
1902 
1903 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1904 			       &adev->gfx.me.me_fw_data_gpu_addr,
1905 			       (void **)&adev->gfx.me.me_fw_data_ptr);
1906 }
1907 
1908 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1909 {
1910 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1911 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1912 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1913 }
1914 
1915 static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
1916 {
1917 	int i;
1918 	struct amdgpu_device *adev = ip_block->adev;
1919 
1920 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1921 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1922 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1923 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1924 
1925 	amdgpu_gfx_mqd_sw_fini(adev, 0);
1926 
1927 	if (!adev->enable_mes_kiq) {
1928 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1929 		amdgpu_gfx_kiq_fini(adev, 0);
1930 	}
1931 
1932 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
1933 
1934 	gfx_v11_0_pfp_fini(adev);
1935 	gfx_v11_0_me_fini(adev);
1936 	gfx_v11_0_rlc_fini(adev);
1937 	gfx_v11_0_mec_fini(adev);
1938 
1939 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1940 		gfx_v11_0_rlc_autoload_buffer_fini(adev);
1941 
1942 	gfx_v11_0_free_microcode(adev);
1943 
1944 	amdgpu_gfx_sysfs_fini(adev);
1945 
1946 	kfree(adev->gfx.ip_dump_core);
1947 	kfree(adev->gfx.ip_dump_compute_queues);
1948 	kfree(adev->gfx.ip_dump_gfx_queues);
1949 
1950 	return 0;
1951 }
1952 
1953 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1954 				   u32 sh_num, u32 instance, int xcc_id)
1955 {
1956 	u32 data;
1957 
1958 	if (instance == 0xffffffff)
1959 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1960 				     INSTANCE_BROADCAST_WRITES, 1);
1961 	else
1962 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1963 				     instance);
1964 
1965 	if (se_num == 0xffffffff)
1966 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1967 				     1);
1968 	else
1969 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1970 
1971 	if (sh_num == 0xffffffff)
1972 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1973 				     1);
1974 	else
1975 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1976 
1977 	WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1978 }
1979 
1980 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1981 {
1982 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1983 
1984 	gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
1985 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1986 					   CC_GC_SA_UNIT_DISABLE,
1987 					   SA_DISABLE);
1988 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
1989 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1990 						 GC_USER_SA_UNIT_DISABLE,
1991 						 SA_DISABLE);
1992 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1993 					    adev->gfx.config.max_shader_engines);
1994 
1995 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1996 }
1997 
1998 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1999 {
2000 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
2001 	u32 rb_mask;
2002 
2003 	gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
2004 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
2005 					    CC_RB_BACKEND_DISABLE,
2006 					    BACKEND_DISABLE);
2007 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
2008 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
2009 						 GC_USER_RB_BACKEND_DISABLE,
2010 						 BACKEND_DISABLE);
2011 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
2012 					    adev->gfx.config.max_shader_engines);
2013 
2014 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
2015 }
2016 
2017 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
2018 {
2019 	u32 rb_bitmap_per_sa;
2020 	u32 rb_bitmap_width_per_sa;
2021 	u32 max_sa;
2022 	u32 active_sa_bitmap;
2023 	u32 global_active_rb_bitmap;
2024 	u32 active_rb_bitmap = 0;
2025 	u32 i;
2026 
2027 	/* query sa bitmap from SA_UNIT_DISABLE registers */
2028 	active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
2029 	/* query rb bitmap from RB_BACKEND_DISABLE registers */
2030 	global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
2031 
2032 	/* generate active rb bitmap according to active sa bitmap */
2033 	max_sa = adev->gfx.config.max_shader_engines *
2034 		 adev->gfx.config.max_sh_per_se;
2035 	rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
2036 				 adev->gfx.config.max_sh_per_se;
2037 	rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
2038 
2039 	for (i = 0; i < max_sa; i++) {
2040 		if (active_sa_bitmap & (1 << i))
2041 			active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
2042 	}
2043 
2044 	active_rb_bitmap &= global_active_rb_bitmap;
2045 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
2046 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
2047 }
2048 
2049 #define DEFAULT_SH_MEM_BASES	(0x6000)
2050 #define LDS_APP_BASE           0x1
2051 #define SCRATCH_APP_BASE       0x2
2052 
2053 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
2054 {
2055 	int i;
2056 	uint32_t sh_mem_bases;
2057 	uint32_t data;
2058 
2059 	/*
2060 	 * Configure apertures:
2061 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2062 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2063 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2064 	 */
2065 	sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
2066 			SCRATCH_APP_BASE;
2067 
2068 	mutex_lock(&adev->srbm_mutex);
2069 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2070 		soc21_grbm_select(adev, 0, 0, 0, i);
2071 		/* CP and shaders */
2072 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
2073 		WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
2074 
2075 		/* Enable trap for each kfd vmid. */
2076 		data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
2077 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
2078 		WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
2079 	}
2080 	soc21_grbm_select(adev, 0, 0, 0, 0);
2081 	mutex_unlock(&adev->srbm_mutex);
2082 
2083 	/*
2084 	 * Initialize all compute VMIDs to have no GDS, GWS, or OA
2085 	 * access. These should be enabled by FW for target VMIDs.
2086 	 */
2087 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2088 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
2089 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
2090 		WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
2091 		WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
2092 	}
2093 }
2094 
2095 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
2096 {
2097 	int vmid;
2098 
2099 	/*
2100 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2101 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
2102 	 * the driver can enable them for graphics. VMID0 should maintain
2103 	 * access so that HWS firmware can save/restore entries.
2104 	 */
2105 	for (vmid = 1; vmid < 16; vmid++) {
2106 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
2107 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
2108 		WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
2109 		WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
2110 	}
2111 }
2112 
2113 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
2114 {
2115 	/* TODO: harvest feature to be added later. */
2116 }
2117 
2118 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
2119 {
2120 	/* TCCs are global (not instanced). */
2121 	uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
2122 			       RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
2123 
2124 	adev->gfx.config.tcc_disabled_mask =
2125 		REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
2126 		(REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
2127 }
2128 
2129 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
2130 {
2131 	u32 tmp;
2132 	int i;
2133 
2134 	if (!amdgpu_sriov_vf(adev))
2135 		WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2136 
2137 	gfx_v11_0_setup_rb(adev);
2138 	gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
2139 	gfx_v11_0_get_tcc_info(adev);
2140 	adev->gfx.config.pa_sc_tile_steering_override = 0;
2141 
2142 	/* Set whether texture coordinate truncation is conformant. */
2143 	tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
2144 	adev->gfx.config.ta_cntl2_truncate_coord_mode =
2145 		REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
2146 
2147 	/* XXX SH_MEM regs */
2148 	/* where to put LDS, scratch, GPUVM in FSA64 space */
2149 	mutex_lock(&adev->srbm_mutex);
2150 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
2151 		soc21_grbm_select(adev, 0, 0, 0, i);
2152 		/* CP and shaders */
2153 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
2154 		if (i != 0) {
2155 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2156 				(adev->gmc.private_aperture_start >> 48));
2157 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2158 				(adev->gmc.shared_aperture_start >> 48));
2159 			WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
2160 		}
2161 	}
2162 	soc21_grbm_select(adev, 0, 0, 0, 0);
2163 
2164 	mutex_unlock(&adev->srbm_mutex);
2165 
2166 	gfx_v11_0_init_compute_vmid(adev);
2167 	gfx_v11_0_init_gds_vmid(adev);
2168 }
2169 
2170 static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev,
2171 				      int me, int pipe)
2172 {
2173 	if (me != 0)
2174 		return 0;
2175 
2176 	switch (pipe) {
2177 	case 0:
2178 		return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
2179 	case 1:
2180 		return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
2181 	default:
2182 		return 0;
2183 	}
2184 }
2185 
2186 static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev,
2187 				      int me, int pipe)
2188 {
2189 	/*
2190 	 * amdgpu controls only the first MEC. That's why this function only
2191 	 * handles the setting of interrupts for this specific MEC. All other
2192 	 * pipes' interrupts are set by amdkfd.
2193 	 */
2194 	if (me != 1)
2195 		return 0;
2196 
2197 	switch (pipe) {
2198 	case 0:
2199 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
2200 	case 1:
2201 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
2202 	case 2:
2203 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
2204 	case 3:
2205 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
2206 	default:
2207 		return 0;
2208 	}
2209 }
2210 
2211 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2212 					       bool enable)
2213 {
2214 	u32 tmp, cp_int_cntl_reg;
2215 	int i, j;
2216 
2217 	if (amdgpu_sriov_vf(adev))
2218 		return;
2219 
2220 	for (i = 0; i < adev->gfx.me.num_me; i++) {
2221 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
2222 			cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
2223 
2224 			if (cp_int_cntl_reg) {
2225 				tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
2226 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
2227 						    enable ? 1 : 0);
2228 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
2229 						    enable ? 1 : 0);
2230 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
2231 						    enable ? 1 : 0);
2232 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
2233 						    enable ? 1 : 0);
2234 				WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
2235 			}
2236 		}
2237 	}
2238 }
2239 
2240 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
2241 {
2242 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2243 
2244 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
2245 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2246 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
2247 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2248 	WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
2249 
2250 	return 0;
2251 }
2252 
2253 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
2254 {
2255 	u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
2256 
2257 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
2258 	WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
2259 }
2260 
2261 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
2262 {
2263 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2264 	udelay(50);
2265 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2266 	udelay(50);
2267 }
2268 
2269 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
2270 					     bool enable)
2271 {
2272 	uint32_t rlc_pg_cntl;
2273 
2274 	rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
2275 
2276 	if (!enable) {
2277 		/* RLC_PG_CNTL[23] = 0 (default)
2278 		 * RLC will wait for handshake acks with SMU
2279 		 * GFXOFF will be enabled
2280 		 * RLC_PG_CNTL[23] = 1
2281 		 * RLC will not issue any message to SMU
2282 		 * hence no handshake between SMU & RLC
2283 		 * GFXOFF will be disabled
2284 		 */
2285 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2286 	} else
2287 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2288 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
2289 }
2290 
2291 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
2292 {
2293 	/* TODO: enable rlc & smu handshake until smu
2294 	 * and gfxoff feature works as expected */
2295 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
2296 		gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
2297 
2298 	WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2299 	udelay(50);
2300 }
2301 
2302 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
2303 {
2304 	uint32_t tmp;
2305 
2306 	/* enable Save Restore Machine */
2307 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
2308 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2309 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
2310 	WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
2311 }
2312 
2313 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
2314 {
2315 	const struct rlc_firmware_header_v2_0 *hdr;
2316 	const __le32 *fw_data;
2317 	unsigned i, fw_size;
2318 
2319 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2320 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2321 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2322 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2323 
2324 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
2325 		     RLCG_UCODE_LOADING_START_ADDRESS);
2326 
2327 	for (i = 0; i < fw_size; i++)
2328 		WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
2329 			     le32_to_cpup(fw_data++));
2330 
2331 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2332 }
2333 
2334 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
2335 {
2336 	const struct rlc_firmware_header_v2_2 *hdr;
2337 	const __le32 *fw_data;
2338 	unsigned i, fw_size;
2339 	u32 tmp;
2340 
2341 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
2342 
2343 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2344 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
2345 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
2346 
2347 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
2348 
2349 	for (i = 0; i < fw_size; i++) {
2350 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2351 			msleep(1);
2352 		WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
2353 				le32_to_cpup(fw_data++));
2354 	}
2355 
2356 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2357 
2358 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2359 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
2360 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
2361 
2362 	WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
2363 	for (i = 0; i < fw_size; i++) {
2364 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2365 			msleep(1);
2366 		WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
2367 				le32_to_cpup(fw_data++));
2368 	}
2369 
2370 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2371 
2372 	tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
2373 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
2374 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
2375 	WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
2376 }
2377 
2378 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
2379 {
2380 	const struct rlc_firmware_header_v2_3 *hdr;
2381 	const __le32 *fw_data;
2382 	unsigned i, fw_size;
2383 	u32 tmp;
2384 
2385 	hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
2386 
2387 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2388 			le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
2389 	fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
2390 
2391 	WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
2392 
2393 	for (i = 0; i < fw_size; i++) {
2394 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2395 			msleep(1);
2396 		WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
2397 				le32_to_cpup(fw_data++));
2398 	}
2399 
2400 	WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
2401 
2402 	tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
2403 	tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
2404 	WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
2405 
2406 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2407 			le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
2408 	fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
2409 
2410 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
2411 
2412 	for (i = 0; i < fw_size; i++) {
2413 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2414 			msleep(1);
2415 		WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
2416 				le32_to_cpup(fw_data++));
2417 	}
2418 
2419 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
2420 
2421 	tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
2422 	tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
2423 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
2424 }
2425 
2426 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
2427 {
2428 	const struct rlc_firmware_header_v2_0 *hdr;
2429 	uint16_t version_major;
2430 	uint16_t version_minor;
2431 
2432 	if (!adev->gfx.rlc_fw)
2433 		return -EINVAL;
2434 
2435 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2436 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
2437 
2438 	version_major = le16_to_cpu(hdr->header.header_version_major);
2439 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
2440 
2441 	if (version_major == 2) {
2442 		gfx_v11_0_load_rlcg_microcode(adev);
2443 		if (amdgpu_dpm == 1) {
2444 			if (version_minor >= 2)
2445 				gfx_v11_0_load_rlc_iram_dram_microcode(adev);
2446 			if (version_minor == 3)
2447 				gfx_v11_0_load_rlcp_rlcv_microcode(adev);
2448 		}
2449 
2450 		return 0;
2451 	}
2452 
2453 	return -EINVAL;
2454 }
2455 
2456 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
2457 {
2458 	int r;
2459 
2460 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2461 		gfx_v11_0_init_csb(adev);
2462 
2463 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
2464 			gfx_v11_0_rlc_enable_srm(adev);
2465 	} else {
2466 		if (amdgpu_sriov_vf(adev)) {
2467 			gfx_v11_0_init_csb(adev);
2468 			return 0;
2469 		}
2470 
2471 		adev->gfx.rlc.funcs->stop(adev);
2472 
2473 		/* disable CG */
2474 		WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
2475 
2476 		/* disable PG */
2477 		WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
2478 
2479 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2480 			/* legacy rlc firmware loading */
2481 			r = gfx_v11_0_rlc_load_microcode(adev);
2482 			if (r)
2483 				return r;
2484 		}
2485 
2486 		gfx_v11_0_init_csb(adev);
2487 
2488 		adev->gfx.rlc.funcs->start(adev);
2489 	}
2490 	return 0;
2491 }
2492 
2493 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2494 {
2495 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2496 	uint32_t tmp;
2497 	int i;
2498 
2499 	/* Trigger an invalidation of the L1 instruction caches */
2500 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2501 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2502 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2503 
2504 	/* Wait for invalidation complete */
2505 	for (i = 0; i < usec_timeout; i++) {
2506 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2507 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2508 					INVALIDATE_CACHE_COMPLETE))
2509 			break;
2510 		udelay(1);
2511 	}
2512 
2513 	if (i >= usec_timeout) {
2514 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2515 		return -EINVAL;
2516 	}
2517 
2518 	if (amdgpu_emu_mode == 1)
2519 		amdgpu_device_flush_hdp(adev, NULL);
2520 
2521 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2522 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2523 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2524 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2525 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2526 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2527 
2528 	/* Program me ucode address into intruction cache address register */
2529 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2530 			lower_32_bits(addr) & 0xFFFFF000);
2531 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2532 			upper_32_bits(addr));
2533 
2534 	return 0;
2535 }
2536 
2537 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2538 {
2539 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2540 	uint32_t tmp;
2541 	int i;
2542 
2543 	/* Trigger an invalidation of the L1 instruction caches */
2544 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2545 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2546 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2547 
2548 	/* Wait for invalidation complete */
2549 	for (i = 0; i < usec_timeout; i++) {
2550 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2551 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2552 					INVALIDATE_CACHE_COMPLETE))
2553 			break;
2554 		udelay(1);
2555 	}
2556 
2557 	if (i >= usec_timeout) {
2558 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2559 		return -EINVAL;
2560 	}
2561 
2562 	if (amdgpu_emu_mode == 1)
2563 		amdgpu_device_flush_hdp(adev, NULL);
2564 
2565 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2566 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2567 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2568 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2569 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2570 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2571 
2572 	/* Program pfp ucode address into intruction cache address register */
2573 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2574 			lower_32_bits(addr) & 0xFFFFF000);
2575 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2576 			upper_32_bits(addr));
2577 
2578 	return 0;
2579 }
2580 
2581 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2582 {
2583 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2584 	uint32_t tmp;
2585 	int i;
2586 
2587 	/* Trigger an invalidation of the L1 instruction caches */
2588 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2589 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2590 
2591 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2592 
2593 	/* Wait for invalidation complete */
2594 	for (i = 0; i < usec_timeout; i++) {
2595 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2596 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2597 					INVALIDATE_CACHE_COMPLETE))
2598 			break;
2599 		udelay(1);
2600 	}
2601 
2602 	if (i >= usec_timeout) {
2603 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2604 		return -EINVAL;
2605 	}
2606 
2607 	if (amdgpu_emu_mode == 1)
2608 		amdgpu_device_flush_hdp(adev, NULL);
2609 
2610 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2611 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2612 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2613 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2614 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2615 
2616 	/* Program mec1 ucode address into intruction cache address register */
2617 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2618 			lower_32_bits(addr) & 0xFFFFF000);
2619 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2620 			upper_32_bits(addr));
2621 
2622 	return 0;
2623 }
2624 
2625 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2626 {
2627 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2628 	uint32_t tmp;
2629 	unsigned i, pipe_id;
2630 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2631 
2632 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2633 		adev->gfx.pfp_fw->data;
2634 
2635 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2636 		lower_32_bits(addr));
2637 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2638 		upper_32_bits(addr));
2639 
2640 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2641 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2642 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2643 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2644 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2645 
2646 	/*
2647 	 * Programming any of the CP_PFP_IC_BASE registers
2648 	 * forces invalidation of the ME L1 I$. Wait for the
2649 	 * invalidation complete
2650 	 */
2651 	for (i = 0; i < usec_timeout; i++) {
2652 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2653 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2654 			INVALIDATE_CACHE_COMPLETE))
2655 			break;
2656 		udelay(1);
2657 	}
2658 
2659 	if (i >= usec_timeout) {
2660 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2661 		return -EINVAL;
2662 	}
2663 
2664 	/* Prime the L1 instruction caches */
2665 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2666 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2667 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2668 	/* Waiting for cache primed*/
2669 	for (i = 0; i < usec_timeout; i++) {
2670 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2671 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2672 			ICACHE_PRIMED))
2673 			break;
2674 		udelay(1);
2675 	}
2676 
2677 	if (i >= usec_timeout) {
2678 		dev_err(adev->dev, "failed to prime instruction cache\n");
2679 		return -EINVAL;
2680 	}
2681 
2682 	mutex_lock(&adev->srbm_mutex);
2683 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2684 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2685 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2686 			(pfp_hdr->ucode_start_addr_hi << 30) |
2687 			(pfp_hdr->ucode_start_addr_lo >> 2));
2688 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2689 			pfp_hdr->ucode_start_addr_hi >> 2);
2690 
2691 		/*
2692 		 * Program CP_ME_CNTL to reset given PIPE to take
2693 		 * effect of CP_PFP_PRGRM_CNTR_START.
2694 		 */
2695 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2696 		if (pipe_id == 0)
2697 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2698 					PFP_PIPE0_RESET, 1);
2699 		else
2700 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2701 					PFP_PIPE1_RESET, 1);
2702 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2703 
2704 		/* Clear pfp pipe0 reset bit. */
2705 		if (pipe_id == 0)
2706 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2707 					PFP_PIPE0_RESET, 0);
2708 		else
2709 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2710 					PFP_PIPE1_RESET, 0);
2711 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2712 
2713 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2714 			lower_32_bits(addr2));
2715 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2716 			upper_32_bits(addr2));
2717 	}
2718 	soc21_grbm_select(adev, 0, 0, 0, 0);
2719 	mutex_unlock(&adev->srbm_mutex);
2720 
2721 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2722 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2723 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2724 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2725 
2726 	/* Invalidate the data caches */
2727 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2728 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2729 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2730 
2731 	for (i = 0; i < usec_timeout; i++) {
2732 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2733 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2734 			INVALIDATE_DCACHE_COMPLETE))
2735 			break;
2736 		udelay(1);
2737 	}
2738 
2739 	if (i >= usec_timeout) {
2740 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2741 		return -EINVAL;
2742 	}
2743 
2744 	return 0;
2745 }
2746 
2747 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2748 {
2749 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2750 	uint32_t tmp;
2751 	unsigned i, pipe_id;
2752 	const struct gfx_firmware_header_v2_0 *me_hdr;
2753 
2754 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2755 		adev->gfx.me_fw->data;
2756 
2757 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2758 		lower_32_bits(addr));
2759 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2760 		upper_32_bits(addr));
2761 
2762 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2763 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2764 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2765 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2766 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2767 
2768 	/*
2769 	 * Programming any of the CP_ME_IC_BASE registers
2770 	 * forces invalidation of the ME L1 I$. Wait for the
2771 	 * invalidation complete
2772 	 */
2773 	for (i = 0; i < usec_timeout; i++) {
2774 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2775 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2776 			INVALIDATE_CACHE_COMPLETE))
2777 			break;
2778 		udelay(1);
2779 	}
2780 
2781 	if (i >= usec_timeout) {
2782 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2783 		return -EINVAL;
2784 	}
2785 
2786 	/* Prime the instruction caches */
2787 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2788 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2789 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2790 
2791 	/* Waiting for instruction cache primed*/
2792 	for (i = 0; i < usec_timeout; i++) {
2793 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2794 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2795 			ICACHE_PRIMED))
2796 			break;
2797 		udelay(1);
2798 	}
2799 
2800 	if (i >= usec_timeout) {
2801 		dev_err(adev->dev, "failed to prime instruction cache\n");
2802 		return -EINVAL;
2803 	}
2804 
2805 	mutex_lock(&adev->srbm_mutex);
2806 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2807 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2808 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2809 			(me_hdr->ucode_start_addr_hi << 30) |
2810 			(me_hdr->ucode_start_addr_lo >> 2) );
2811 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2812 			me_hdr->ucode_start_addr_hi>>2);
2813 
2814 		/*
2815 		 * Program CP_ME_CNTL to reset given PIPE to take
2816 		 * effect of CP_PFP_PRGRM_CNTR_START.
2817 		 */
2818 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2819 		if (pipe_id == 0)
2820 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2821 					ME_PIPE0_RESET, 1);
2822 		else
2823 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2824 					ME_PIPE1_RESET, 1);
2825 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2826 
2827 		/* Clear pfp pipe0 reset bit. */
2828 		if (pipe_id == 0)
2829 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2830 					ME_PIPE0_RESET, 0);
2831 		else
2832 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2833 					ME_PIPE1_RESET, 0);
2834 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2835 
2836 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2837 			lower_32_bits(addr2));
2838 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2839 			upper_32_bits(addr2));
2840 	}
2841 	soc21_grbm_select(adev, 0, 0, 0, 0);
2842 	mutex_unlock(&adev->srbm_mutex);
2843 
2844 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2845 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2846 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2847 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2848 
2849 	/* Invalidate the data caches */
2850 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2851 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2852 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2853 
2854 	for (i = 0; i < usec_timeout; i++) {
2855 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2856 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2857 			INVALIDATE_DCACHE_COMPLETE))
2858 			break;
2859 		udelay(1);
2860 	}
2861 
2862 	if (i >= usec_timeout) {
2863 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2864 		return -EINVAL;
2865 	}
2866 
2867 	return 0;
2868 }
2869 
2870 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2871 {
2872 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2873 	uint32_t tmp;
2874 	unsigned i;
2875 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2876 
2877 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2878 		adev->gfx.mec_fw->data;
2879 
2880 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2881 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2882 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2883 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2884 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2885 
2886 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2887 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2888 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2889 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2890 
2891 	mutex_lock(&adev->srbm_mutex);
2892 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2893 		soc21_grbm_select(adev, 1, i, 0, 0);
2894 
2895 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2896 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2897 		     upper_32_bits(addr2));
2898 
2899 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2900 					mec_hdr->ucode_start_addr_lo >> 2 |
2901 					mec_hdr->ucode_start_addr_hi << 30);
2902 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2903 					mec_hdr->ucode_start_addr_hi >> 2);
2904 
2905 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2906 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2907 		     upper_32_bits(addr));
2908 	}
2909 	mutex_unlock(&adev->srbm_mutex);
2910 	soc21_grbm_select(adev, 0, 0, 0, 0);
2911 
2912 	/* Trigger an invalidation of the L1 instruction caches */
2913 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2914 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2915 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2916 
2917 	/* Wait for invalidation complete */
2918 	for (i = 0; i < usec_timeout; i++) {
2919 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2920 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2921 				       INVALIDATE_DCACHE_COMPLETE))
2922 			break;
2923 		udelay(1);
2924 	}
2925 
2926 	if (i >= usec_timeout) {
2927 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2928 		return -EINVAL;
2929 	}
2930 
2931 	/* Trigger an invalidation of the L1 instruction caches */
2932 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2933 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2934 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2935 
2936 	/* Wait for invalidation complete */
2937 	for (i = 0; i < usec_timeout; i++) {
2938 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2939 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2940 				       INVALIDATE_CACHE_COMPLETE))
2941 			break;
2942 		udelay(1);
2943 	}
2944 
2945 	if (i >= usec_timeout) {
2946 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2947 		return -EINVAL;
2948 	}
2949 
2950 	return 0;
2951 }
2952 
2953 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2954 {
2955 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2956 	const struct gfx_firmware_header_v2_0 *me_hdr;
2957 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2958 	uint32_t pipe_id, tmp;
2959 
2960 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2961 		adev->gfx.mec_fw->data;
2962 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2963 		adev->gfx.me_fw->data;
2964 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2965 		adev->gfx.pfp_fw->data;
2966 
2967 	/* config pfp program start addr */
2968 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2969 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2970 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2971 			(pfp_hdr->ucode_start_addr_hi << 30) |
2972 			(pfp_hdr->ucode_start_addr_lo >> 2));
2973 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2974 			pfp_hdr->ucode_start_addr_hi >> 2);
2975 	}
2976 	soc21_grbm_select(adev, 0, 0, 0, 0);
2977 
2978 	/* reset pfp pipe */
2979 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2980 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2981 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2982 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2983 
2984 	/* clear pfp pipe reset */
2985 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2986 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2987 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2988 
2989 	/* config me program start addr */
2990 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2991 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2992 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2993 			(me_hdr->ucode_start_addr_hi << 30) |
2994 			(me_hdr->ucode_start_addr_lo >> 2) );
2995 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2996 			me_hdr->ucode_start_addr_hi>>2);
2997 	}
2998 	soc21_grbm_select(adev, 0, 0, 0, 0);
2999 
3000 	/* reset me pipe */
3001 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3002 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
3003 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
3004 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3005 
3006 	/* clear me pipe reset */
3007 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
3008 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
3009 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3010 
3011 	/* config mec program start addr */
3012 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
3013 		soc21_grbm_select(adev, 1, pipe_id, 0, 0);
3014 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3015 					mec_hdr->ucode_start_addr_lo >> 2 |
3016 					mec_hdr->ucode_start_addr_hi << 30);
3017 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3018 					mec_hdr->ucode_start_addr_hi >> 2);
3019 	}
3020 	soc21_grbm_select(adev, 0, 0, 0, 0);
3021 
3022 	/* reset mec pipe */
3023 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3024 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
3025 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
3026 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
3027 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
3028 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
3029 
3030 	/* clear mec pipe reset */
3031 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
3032 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
3033 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
3034 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
3035 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
3036 }
3037 
3038 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
3039 {
3040 	uint32_t cp_status;
3041 	uint32_t bootload_status;
3042 	int i, r;
3043 	uint64_t addr, addr2;
3044 
3045 	for (i = 0; i < adev->usec_timeout; i++) {
3046 		cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
3047 
3048 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
3049 			    IP_VERSION(11, 0, 1) ||
3050 		    amdgpu_ip_version(adev, GC_HWIP, 0) ==
3051 			    IP_VERSION(11, 0, 4) ||
3052 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) ||
3053 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) ||
3054 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) ||
3055 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3))
3056 			bootload_status = RREG32_SOC15(GC, 0,
3057 					regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
3058 		else
3059 			bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
3060 
3061 		if ((cp_status == 0) &&
3062 		    (REG_GET_FIELD(bootload_status,
3063 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
3064 			break;
3065 		}
3066 		udelay(1);
3067 	}
3068 
3069 	if (i >= adev->usec_timeout) {
3070 		dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
3071 		return -ETIMEDOUT;
3072 	}
3073 
3074 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
3075 		if (adev->gfx.rs64_enable) {
3076 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3077 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
3078 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
3079 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
3080 			r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
3081 			if (r)
3082 				return r;
3083 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3084 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
3085 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
3086 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
3087 			r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
3088 			if (r)
3089 				return r;
3090 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3091 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
3092 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
3093 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
3094 			r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
3095 			if (r)
3096 				return r;
3097 		} else {
3098 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3099 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
3100 			r = gfx_v11_0_config_me_cache(adev, addr);
3101 			if (r)
3102 				return r;
3103 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3104 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
3105 			r = gfx_v11_0_config_pfp_cache(adev, addr);
3106 			if (r)
3107 				return r;
3108 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3109 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
3110 			r = gfx_v11_0_config_mec_cache(adev, addr);
3111 			if (r)
3112 				return r;
3113 		}
3114 	}
3115 
3116 	return 0;
3117 }
3118 
3119 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3120 {
3121 	int i;
3122 	u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3123 
3124 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3125 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3126 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3127 
3128 	for (i = 0; i < adev->usec_timeout; i++) {
3129 		if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
3130 			break;
3131 		udelay(1);
3132 	}
3133 
3134 	if (i >= adev->usec_timeout)
3135 		DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
3136 
3137 	return 0;
3138 }
3139 
3140 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
3141 {
3142 	int r;
3143 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
3144 	const __le32 *fw_data;
3145 	unsigned i, fw_size;
3146 
3147 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3148 		adev->gfx.pfp_fw->data;
3149 
3150 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3151 
3152 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
3153 		le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3154 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
3155 
3156 	r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
3157 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3158 				      &adev->gfx.pfp.pfp_fw_obj,
3159 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
3160 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
3161 	if (r) {
3162 		dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
3163 		gfx_v11_0_pfp_fini(adev);
3164 		return r;
3165 	}
3166 
3167 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
3168 
3169 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
3170 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
3171 
3172 	gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
3173 
3174 	WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
3175 
3176 	for (i = 0; i < pfp_hdr->jt_size; i++)
3177 		WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
3178 			     le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
3179 
3180 	WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3181 
3182 	return 0;
3183 }
3184 
3185 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
3186 {
3187 	int r;
3188 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
3189 	const __le32 *fw_ucode, *fw_data;
3190 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3191 	uint32_t tmp;
3192 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
3193 
3194 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
3195 		adev->gfx.pfp_fw->data;
3196 
3197 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3198 
3199 	/* instruction */
3200 	fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
3201 		le32_to_cpu(pfp_hdr->ucode_offset_bytes));
3202 	fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
3203 	/* data */
3204 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
3205 		le32_to_cpu(pfp_hdr->data_offset_bytes));
3206 	fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
3207 
3208 	/* 64kb align */
3209 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3210 				      64 * 1024,
3211 				      AMDGPU_GEM_DOMAIN_VRAM |
3212 				      AMDGPU_GEM_DOMAIN_GTT,
3213 				      &adev->gfx.pfp.pfp_fw_obj,
3214 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
3215 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
3216 	if (r) {
3217 		dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
3218 		gfx_v11_0_pfp_fini(adev);
3219 		return r;
3220 	}
3221 
3222 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3223 				      64 * 1024,
3224 				      AMDGPU_GEM_DOMAIN_VRAM |
3225 				      AMDGPU_GEM_DOMAIN_GTT,
3226 				      &adev->gfx.pfp.pfp_fw_data_obj,
3227 				      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
3228 				      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
3229 	if (r) {
3230 		dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
3231 		gfx_v11_0_pfp_fini(adev);
3232 		return r;
3233 	}
3234 
3235 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
3236 	memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
3237 
3238 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
3239 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
3240 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
3241 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
3242 
3243 	if (amdgpu_emu_mode == 1)
3244 		amdgpu_device_flush_hdp(adev, NULL);
3245 
3246 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
3247 		lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
3248 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
3249 		upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
3250 
3251 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
3252 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
3253 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
3254 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
3255 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
3256 
3257 	/*
3258 	 * Programming any of the CP_PFP_IC_BASE registers
3259 	 * forces invalidation of the ME L1 I$. Wait for the
3260 	 * invalidation complete
3261 	 */
3262 	for (i = 0; i < usec_timeout; i++) {
3263 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3264 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3265 			INVALIDATE_CACHE_COMPLETE))
3266 			break;
3267 		udelay(1);
3268 	}
3269 
3270 	if (i >= usec_timeout) {
3271 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3272 		return -EINVAL;
3273 	}
3274 
3275 	/* Prime the L1 instruction caches */
3276 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3277 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
3278 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
3279 	/* Waiting for cache primed*/
3280 	for (i = 0; i < usec_timeout; i++) {
3281 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3282 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3283 			ICACHE_PRIMED))
3284 			break;
3285 		udelay(1);
3286 	}
3287 
3288 	if (i >= usec_timeout) {
3289 		dev_err(adev->dev, "failed to prime instruction cache\n");
3290 		return -EINVAL;
3291 	}
3292 
3293 	mutex_lock(&adev->srbm_mutex);
3294 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3295 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3296 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
3297 			(pfp_hdr->ucode_start_addr_hi << 30) |
3298 			(pfp_hdr->ucode_start_addr_lo >> 2) );
3299 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
3300 			pfp_hdr->ucode_start_addr_hi>>2);
3301 
3302 		/*
3303 		 * Program CP_ME_CNTL to reset given PIPE to take
3304 		 * effect of CP_PFP_PRGRM_CNTR_START.
3305 		 */
3306 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3307 		if (pipe_id == 0)
3308 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3309 					PFP_PIPE0_RESET, 1);
3310 		else
3311 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3312 					PFP_PIPE1_RESET, 1);
3313 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3314 
3315 		/* Clear pfp pipe0 reset bit. */
3316 		if (pipe_id == 0)
3317 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3318 					PFP_PIPE0_RESET, 0);
3319 		else
3320 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3321 					PFP_PIPE1_RESET, 0);
3322 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3323 
3324 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
3325 			lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3326 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
3327 			upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3328 	}
3329 	soc21_grbm_select(adev, 0, 0, 0, 0);
3330 	mutex_unlock(&adev->srbm_mutex);
3331 
3332 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3333 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3334 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3335 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3336 
3337 	/* Invalidate the data caches */
3338 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3339 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3340 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3341 
3342 	for (i = 0; i < usec_timeout; i++) {
3343 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3344 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3345 			INVALIDATE_DCACHE_COMPLETE))
3346 			break;
3347 		udelay(1);
3348 	}
3349 
3350 	if (i >= usec_timeout) {
3351 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3352 		return -EINVAL;
3353 	}
3354 
3355 	return 0;
3356 }
3357 
3358 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
3359 {
3360 	int r;
3361 	const struct gfx_firmware_header_v1_0 *me_hdr;
3362 	const __le32 *fw_data;
3363 	unsigned i, fw_size;
3364 
3365 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
3366 		adev->gfx.me_fw->data;
3367 
3368 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3369 
3370 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3371 		le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3372 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
3373 
3374 	r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
3375 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3376 				      &adev->gfx.me.me_fw_obj,
3377 				      &adev->gfx.me.me_fw_gpu_addr,
3378 				      (void **)&adev->gfx.me.me_fw_ptr);
3379 	if (r) {
3380 		dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
3381 		gfx_v11_0_me_fini(adev);
3382 		return r;
3383 	}
3384 
3385 	memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
3386 
3387 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3388 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3389 
3390 	gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
3391 
3392 	WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
3393 
3394 	for (i = 0; i < me_hdr->jt_size; i++)
3395 		WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
3396 			     le32_to_cpup(fw_data + me_hdr->jt_offset + i));
3397 
3398 	WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
3399 
3400 	return 0;
3401 }
3402 
3403 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
3404 {
3405 	int r;
3406 	const struct gfx_firmware_header_v2_0 *me_hdr;
3407 	const __le32 *fw_ucode, *fw_data;
3408 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3409 	uint32_t tmp;
3410 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
3411 
3412 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
3413 		adev->gfx.me_fw->data;
3414 
3415 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3416 
3417 	/* instruction */
3418 	fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
3419 		le32_to_cpu(me_hdr->ucode_offset_bytes));
3420 	fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
3421 	/* data */
3422 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3423 		le32_to_cpu(me_hdr->data_offset_bytes));
3424 	fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
3425 
3426 	/* 64kb align*/
3427 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3428 				      64 * 1024,
3429 				      AMDGPU_GEM_DOMAIN_VRAM |
3430 				      AMDGPU_GEM_DOMAIN_GTT,
3431 				      &adev->gfx.me.me_fw_obj,
3432 				      &adev->gfx.me.me_fw_gpu_addr,
3433 				      (void **)&adev->gfx.me.me_fw_ptr);
3434 	if (r) {
3435 		dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
3436 		gfx_v11_0_me_fini(adev);
3437 		return r;
3438 	}
3439 
3440 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3441 				      64 * 1024,
3442 				      AMDGPU_GEM_DOMAIN_VRAM |
3443 				      AMDGPU_GEM_DOMAIN_GTT,
3444 				      &adev->gfx.me.me_fw_data_obj,
3445 				      &adev->gfx.me.me_fw_data_gpu_addr,
3446 				      (void **)&adev->gfx.me.me_fw_data_ptr);
3447 	if (r) {
3448 		dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
3449 		gfx_v11_0_pfp_fini(adev);
3450 		return r;
3451 	}
3452 
3453 	memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
3454 	memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
3455 
3456 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3457 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
3458 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3459 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
3460 
3461 	if (amdgpu_emu_mode == 1)
3462 		amdgpu_device_flush_hdp(adev, NULL);
3463 
3464 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
3465 		lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
3466 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
3467 		upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
3468 
3469 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
3470 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
3471 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
3472 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
3473 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
3474 
3475 	/*
3476 	 * Programming any of the CP_ME_IC_BASE registers
3477 	 * forces invalidation of the ME L1 I$. Wait for the
3478 	 * invalidation complete
3479 	 */
3480 	for (i = 0; i < usec_timeout; i++) {
3481 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3482 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3483 			INVALIDATE_CACHE_COMPLETE))
3484 			break;
3485 		udelay(1);
3486 	}
3487 
3488 	if (i >= usec_timeout) {
3489 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3490 		return -EINVAL;
3491 	}
3492 
3493 	/* Prime the instruction caches */
3494 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3495 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3496 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3497 
3498 	/* Waiting for instruction cache primed*/
3499 	for (i = 0; i < usec_timeout; i++) {
3500 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3501 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3502 			ICACHE_PRIMED))
3503 			break;
3504 		udelay(1);
3505 	}
3506 
3507 	if (i >= usec_timeout) {
3508 		dev_err(adev->dev, "failed to prime instruction cache\n");
3509 		return -EINVAL;
3510 	}
3511 
3512 	mutex_lock(&adev->srbm_mutex);
3513 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3514 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3515 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3516 			(me_hdr->ucode_start_addr_hi << 30) |
3517 			(me_hdr->ucode_start_addr_lo >> 2) );
3518 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3519 			me_hdr->ucode_start_addr_hi>>2);
3520 
3521 		/*
3522 		 * Program CP_ME_CNTL to reset given PIPE to take
3523 		 * effect of CP_PFP_PRGRM_CNTR_START.
3524 		 */
3525 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3526 		if (pipe_id == 0)
3527 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3528 					ME_PIPE0_RESET, 1);
3529 		else
3530 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3531 					ME_PIPE1_RESET, 1);
3532 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3533 
3534 		/* Clear pfp pipe0 reset bit. */
3535 		if (pipe_id == 0)
3536 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3537 					ME_PIPE0_RESET, 0);
3538 		else
3539 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3540 					ME_PIPE1_RESET, 0);
3541 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3542 
3543 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3544 			lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3545 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3546 			upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3547 	}
3548 	soc21_grbm_select(adev, 0, 0, 0, 0);
3549 	mutex_unlock(&adev->srbm_mutex);
3550 
3551 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3552 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3553 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3554 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3555 
3556 	/* Invalidate the data caches */
3557 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3558 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3559 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3560 
3561 	for (i = 0; i < usec_timeout; i++) {
3562 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3563 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3564 			INVALIDATE_DCACHE_COMPLETE))
3565 			break;
3566 		udelay(1);
3567 	}
3568 
3569 	if (i >= usec_timeout) {
3570 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3571 		return -EINVAL;
3572 	}
3573 
3574 	return 0;
3575 }
3576 
3577 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3578 {
3579 	int r;
3580 
3581 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3582 		return -EINVAL;
3583 
3584 	gfx_v11_0_cp_gfx_enable(adev, false);
3585 
3586 	if (adev->gfx.rs64_enable)
3587 		r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3588 	else
3589 		r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3590 	if (r) {
3591 		dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3592 		return r;
3593 	}
3594 
3595 	if (adev->gfx.rs64_enable)
3596 		r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3597 	else
3598 		r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3599 	if (r) {
3600 		dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3601 		return r;
3602 	}
3603 
3604 	return 0;
3605 }
3606 
3607 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3608 {
3609 	struct amdgpu_ring *ring;
3610 	const struct cs_section_def *sect = NULL;
3611 	const struct cs_extent_def *ext = NULL;
3612 	int r, i;
3613 	int ctx_reg_offset;
3614 
3615 	/* init the CP */
3616 	WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3617 		     adev->gfx.config.max_hw_contexts - 1);
3618 	WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3619 
3620 	if (!amdgpu_async_gfx_ring)
3621 		gfx_v11_0_cp_gfx_enable(adev, true);
3622 
3623 	ring = &adev->gfx.gfx_ring[0];
3624 	r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3625 	if (r) {
3626 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3627 		return r;
3628 	}
3629 
3630 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3631 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3632 
3633 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3634 	amdgpu_ring_write(ring, 0x80000000);
3635 	amdgpu_ring_write(ring, 0x80000000);
3636 
3637 	for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3638 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3639 			if (sect->id == SECT_CONTEXT) {
3640 				amdgpu_ring_write(ring,
3641 						  PACKET3(PACKET3_SET_CONTEXT_REG,
3642 							  ext->reg_count));
3643 				amdgpu_ring_write(ring, ext->reg_index -
3644 						  PACKET3_SET_CONTEXT_REG_START);
3645 				for (i = 0; i < ext->reg_count; i++)
3646 					amdgpu_ring_write(ring, ext->extent[i]);
3647 			}
3648 		}
3649 	}
3650 
3651 	ctx_reg_offset =
3652 		SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3653 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3654 	amdgpu_ring_write(ring, ctx_reg_offset);
3655 	amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3656 
3657 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3658 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3659 
3660 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3661 	amdgpu_ring_write(ring, 0);
3662 
3663 	amdgpu_ring_commit(ring);
3664 
3665 	/* submit cs packet to copy state 0 to next available state */
3666 	if (adev->gfx.num_gfx_rings > 1) {
3667 		/* maximum supported gfx ring is 2 */
3668 		ring = &adev->gfx.gfx_ring[1];
3669 		r = amdgpu_ring_alloc(ring, 2);
3670 		if (r) {
3671 			DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3672 			return r;
3673 		}
3674 
3675 		amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3676 		amdgpu_ring_write(ring, 0);
3677 
3678 		amdgpu_ring_commit(ring);
3679 	}
3680 	return 0;
3681 }
3682 
3683 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3684 					 CP_PIPE_ID pipe)
3685 {
3686 	u32 tmp;
3687 
3688 	tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3689 	tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3690 
3691 	WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3692 }
3693 
3694 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3695 					  struct amdgpu_ring *ring)
3696 {
3697 	u32 tmp;
3698 
3699 	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3700 	if (ring->use_doorbell) {
3701 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3702 				    DOORBELL_OFFSET, ring->doorbell_index);
3703 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3704 				    DOORBELL_EN, 1);
3705 	} else {
3706 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3707 				    DOORBELL_EN, 0);
3708 	}
3709 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3710 
3711 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3712 			    DOORBELL_RANGE_LOWER, ring->doorbell_index);
3713 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3714 
3715 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3716 		     CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3717 }
3718 
3719 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3720 {
3721 	struct amdgpu_ring *ring;
3722 	u32 tmp;
3723 	u32 rb_bufsz;
3724 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
3725 
3726 	/* Set the write pointer delay */
3727 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3728 
3729 	/* set the RB to use vmid 0 */
3730 	WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3731 
3732 	/* Init gfx ring 0 for pipe 0 */
3733 	mutex_lock(&adev->srbm_mutex);
3734 	gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3735 
3736 	/* Set ring buffer size */
3737 	ring = &adev->gfx.gfx_ring[0];
3738 	rb_bufsz = order_base_2(ring->ring_size / 8);
3739 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3740 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3741 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3742 
3743 	/* Initialize the ring buffer's write pointers */
3744 	ring->wptr = 0;
3745 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3746 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3747 
3748 	/* set the wb address whether it's enabled or not */
3749 	rptr_addr = ring->rptr_gpu_addr;
3750 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3751 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3752 		     CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3753 
3754 	wptr_gpu_addr = ring->wptr_gpu_addr;
3755 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3756 		     lower_32_bits(wptr_gpu_addr));
3757 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3758 		     upper_32_bits(wptr_gpu_addr));
3759 
3760 	mdelay(1);
3761 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3762 
3763 	rb_addr = ring->gpu_addr >> 8;
3764 	WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3765 	WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3766 
3767 	WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3768 
3769 	gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3770 	mutex_unlock(&adev->srbm_mutex);
3771 
3772 	/* Init gfx ring 1 for pipe 1 */
3773 	if (adev->gfx.num_gfx_rings > 1) {
3774 		mutex_lock(&adev->srbm_mutex);
3775 		gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3776 		/* maximum supported gfx ring is 2 */
3777 		ring = &adev->gfx.gfx_ring[1];
3778 		rb_bufsz = order_base_2(ring->ring_size / 8);
3779 		tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3780 		tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3781 		WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3782 		/* Initialize the ring buffer's write pointers */
3783 		ring->wptr = 0;
3784 		WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3785 		WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3786 		/* Set the wb address whether it's enabled or not */
3787 		rptr_addr = ring->rptr_gpu_addr;
3788 		WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3789 		WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3790 			     CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3791 		wptr_gpu_addr = ring->wptr_gpu_addr;
3792 		WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3793 			     lower_32_bits(wptr_gpu_addr));
3794 		WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3795 			     upper_32_bits(wptr_gpu_addr));
3796 
3797 		mdelay(1);
3798 		WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3799 
3800 		rb_addr = ring->gpu_addr >> 8;
3801 		WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3802 		WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3803 		WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3804 
3805 		gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3806 		mutex_unlock(&adev->srbm_mutex);
3807 	}
3808 	/* Switch to pipe 0 */
3809 	mutex_lock(&adev->srbm_mutex);
3810 	gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3811 	mutex_unlock(&adev->srbm_mutex);
3812 
3813 	/* start the ring */
3814 	gfx_v11_0_cp_gfx_start(adev);
3815 
3816 	return 0;
3817 }
3818 
3819 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3820 {
3821 	u32 data;
3822 
3823 	if (adev->gfx.rs64_enable) {
3824 		data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3825 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3826 							 enable ? 0 : 1);
3827 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3828 							 enable ? 0 : 1);
3829 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3830 							 enable ? 0 : 1);
3831 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3832 							 enable ? 0 : 1);
3833 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3834 							 enable ? 0 : 1);
3835 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3836 							 enable ? 1 : 0);
3837 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3838 				                         enable ? 1 : 0);
3839 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3840 							 enable ? 1 : 0);
3841 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3842 							 enable ? 1 : 0);
3843 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3844 							 enable ? 0 : 1);
3845 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3846 	} else {
3847 		data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3848 
3849 		if (enable) {
3850 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3851 			if (!adev->enable_mes_kiq)
3852 				data = REG_SET_FIELD(data, CP_MEC_CNTL,
3853 						     MEC_ME2_HALT, 0);
3854 		} else {
3855 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3856 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3857 		}
3858 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3859 	}
3860 
3861 	udelay(50);
3862 }
3863 
3864 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3865 {
3866 	const struct gfx_firmware_header_v1_0 *mec_hdr;
3867 	const __le32 *fw_data;
3868 	unsigned i, fw_size;
3869 	u32 *fw = NULL;
3870 	int r;
3871 
3872 	if (!adev->gfx.mec_fw)
3873 		return -EINVAL;
3874 
3875 	gfx_v11_0_cp_compute_enable(adev, false);
3876 
3877 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3878 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3879 
3880 	fw_data = (const __le32 *)
3881 		(adev->gfx.mec_fw->data +
3882 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3883 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3884 
3885 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3886 					  PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3887 					  &adev->gfx.mec.mec_fw_obj,
3888 					  &adev->gfx.mec.mec_fw_gpu_addr,
3889 					  (void **)&fw);
3890 	if (r) {
3891 		dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3892 		gfx_v11_0_mec_fini(adev);
3893 		return r;
3894 	}
3895 
3896 	memcpy(fw, fw_data, fw_size);
3897 
3898 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3899 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3900 
3901 	gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3902 
3903 	/* MEC1 */
3904 	WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3905 
3906 	for (i = 0; i < mec_hdr->jt_size; i++)
3907 		WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3908 			     le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3909 
3910 	WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3911 
3912 	return 0;
3913 }
3914 
3915 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3916 {
3917 	const struct gfx_firmware_header_v2_0 *mec_hdr;
3918 	const __le32 *fw_ucode, *fw_data;
3919 	u32 tmp, fw_ucode_size, fw_data_size;
3920 	u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3921 	u32 *fw_ucode_ptr, *fw_data_ptr;
3922 	int r;
3923 
3924 	if (!adev->gfx.mec_fw)
3925 		return -EINVAL;
3926 
3927 	gfx_v11_0_cp_compute_enable(adev, false);
3928 
3929 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3930 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3931 
3932 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3933 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
3934 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3935 
3936 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3937 				le32_to_cpu(mec_hdr->data_offset_bytes));
3938 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3939 
3940 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3941 				      64 * 1024,
3942 				      AMDGPU_GEM_DOMAIN_VRAM |
3943 				      AMDGPU_GEM_DOMAIN_GTT,
3944 				      &adev->gfx.mec.mec_fw_obj,
3945 				      &adev->gfx.mec.mec_fw_gpu_addr,
3946 				      (void **)&fw_ucode_ptr);
3947 	if (r) {
3948 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3949 		gfx_v11_0_mec_fini(adev);
3950 		return r;
3951 	}
3952 
3953 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3954 				      64 * 1024,
3955 				      AMDGPU_GEM_DOMAIN_VRAM |
3956 				      AMDGPU_GEM_DOMAIN_GTT,
3957 				      &adev->gfx.mec.mec_fw_data_obj,
3958 				      &adev->gfx.mec.mec_fw_data_gpu_addr,
3959 				      (void **)&fw_data_ptr);
3960 	if (r) {
3961 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3962 		gfx_v11_0_mec_fini(adev);
3963 		return r;
3964 	}
3965 
3966 	memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3967 	memcpy(fw_data_ptr, fw_data, fw_data_size);
3968 
3969 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3970 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3971 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3972 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3973 
3974 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3975 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3976 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3977 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3978 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3979 
3980 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3981 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3982 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3983 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3984 
3985 	mutex_lock(&adev->srbm_mutex);
3986 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3987 		soc21_grbm_select(adev, 1, i, 0, 0);
3988 
3989 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3990 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3991 		     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3992 
3993 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3994 					mec_hdr->ucode_start_addr_lo >> 2 |
3995 					mec_hdr->ucode_start_addr_hi << 30);
3996 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3997 					mec_hdr->ucode_start_addr_hi >> 2);
3998 
3999 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
4000 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
4001 		     upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
4002 	}
4003 	mutex_unlock(&adev->srbm_mutex);
4004 	soc21_grbm_select(adev, 0, 0, 0, 0);
4005 
4006 	/* Trigger an invalidation of the L1 instruction caches */
4007 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
4008 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
4009 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
4010 
4011 	/* Wait for invalidation complete */
4012 	for (i = 0; i < usec_timeout; i++) {
4013 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
4014 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
4015 				       INVALIDATE_DCACHE_COMPLETE))
4016 			break;
4017 		udelay(1);
4018 	}
4019 
4020 	if (i >= usec_timeout) {
4021 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
4022 		return -EINVAL;
4023 	}
4024 
4025 	/* Trigger an invalidation of the L1 instruction caches */
4026 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
4027 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
4028 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
4029 
4030 	/* Wait for invalidation complete */
4031 	for (i = 0; i < usec_timeout; i++) {
4032 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
4033 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
4034 				       INVALIDATE_CACHE_COMPLETE))
4035 			break;
4036 		udelay(1);
4037 	}
4038 
4039 	if (i >= usec_timeout) {
4040 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
4041 		return -EINVAL;
4042 	}
4043 
4044 	return 0;
4045 }
4046 
4047 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
4048 {
4049 	uint32_t tmp;
4050 	struct amdgpu_device *adev = ring->adev;
4051 
4052 	/* tell RLC which is KIQ queue */
4053 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
4054 	tmp &= 0xffffff00;
4055 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4056 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
4057 }
4058 
4059 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
4060 {
4061 	/* set graphics engine doorbell range */
4062 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
4063 		     (adev->doorbell_index.gfx_ring0 * 2) << 2);
4064 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
4065 		     (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
4066 
4067 	/* set compute engine doorbell range */
4068 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
4069 		     (adev->doorbell_index.kiq * 2) << 2);
4070 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
4071 		     (adev->doorbell_index.userqueue_end * 2) << 2);
4072 }
4073 
4074 static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
4075 					   struct v11_gfx_mqd *mqd,
4076 					   struct amdgpu_mqd_prop *prop)
4077 {
4078 	bool priority = 0;
4079 	u32 tmp;
4080 
4081 	/* set up default queue priority level
4082 	 * 0x0 = low priority, 0x1 = high priority
4083 	 */
4084 	if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
4085 		priority = 1;
4086 
4087 	tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
4088 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
4089 	mqd->cp_gfx_hqd_queue_priority = tmp;
4090 }
4091 
4092 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
4093 				  struct amdgpu_mqd_prop *prop)
4094 {
4095 	struct v11_gfx_mqd *mqd = m;
4096 	uint64_t hqd_gpu_addr, wb_gpu_addr;
4097 	uint32_t tmp;
4098 	uint32_t rb_bufsz;
4099 
4100 	/* set up gfx hqd wptr */
4101 	mqd->cp_gfx_hqd_wptr = 0;
4102 	mqd->cp_gfx_hqd_wptr_hi = 0;
4103 
4104 	/* set the pointer to the MQD */
4105 	mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
4106 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
4107 
4108 	/* set up mqd control */
4109 	tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
4110 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
4111 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
4112 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
4113 	mqd->cp_gfx_mqd_control = tmp;
4114 
4115 	/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
4116 	tmp = regCP_GFX_HQD_VMID_DEFAULT;
4117 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
4118 	mqd->cp_gfx_hqd_vmid = 0;
4119 
4120 	/* set up gfx queue priority */
4121 	gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
4122 
4123 	/* set up time quantum */
4124 	tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
4125 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
4126 	mqd->cp_gfx_hqd_quantum = tmp;
4127 
4128 	/* set up gfx hqd base. this is similar as CP_RB_BASE */
4129 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
4130 	mqd->cp_gfx_hqd_base = hqd_gpu_addr;
4131 	mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
4132 
4133 	/* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
4134 	wb_gpu_addr = prop->rptr_gpu_addr;
4135 	mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
4136 	mqd->cp_gfx_hqd_rptr_addr_hi =
4137 		upper_32_bits(wb_gpu_addr) & 0xffff;
4138 
4139 	/* set up rb_wptr_poll addr */
4140 	wb_gpu_addr = prop->wptr_gpu_addr;
4141 	mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4142 	mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4143 
4144 	/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
4145 	rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
4146 	tmp = regCP_GFX_HQD_CNTL_DEFAULT;
4147 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
4148 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
4149 #ifdef __BIG_ENDIAN
4150 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
4151 #endif
4152 	if (prop->tmz_queue)
4153 		tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
4154 	if (!prop->kernel_queue)
4155 		tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
4156 	mqd->cp_gfx_hqd_cntl = tmp;
4157 
4158 	/* set up cp_doorbell_control */
4159 	tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
4160 	if (prop->use_doorbell) {
4161 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4162 				    DOORBELL_OFFSET, prop->doorbell_index);
4163 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4164 				    DOORBELL_EN, 1);
4165 	} else
4166 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4167 				    DOORBELL_EN, 0);
4168 	mqd->cp_rb_doorbell_control = tmp;
4169 
4170 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4171 	mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
4172 
4173 	/* active the queue */
4174 	mqd->cp_gfx_hqd_active = 1;
4175 
4176 	/* set gfx UQ items */
4177 	mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
4178 	mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
4179 	mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr);
4180 	mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr);
4181 	mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
4182 	mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
4183 	mqd->fence_address_lo = lower_32_bits(prop->fence_address);
4184 	mqd->fence_address_hi = upper_32_bits(prop->fence_address);
4185 
4186 	return 0;
4187 }
4188 
4189 static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
4190 {
4191 	struct amdgpu_device *adev = ring->adev;
4192 	struct v11_gfx_mqd *mqd = ring->mqd_ptr;
4193 	int mqd_idx = ring - &adev->gfx.gfx_ring[0];
4194 
4195 	if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
4196 		memset((void *)mqd, 0, sizeof(*mqd));
4197 		mutex_lock(&adev->srbm_mutex);
4198 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4199 		amdgpu_ring_init_mqd(ring);
4200 		soc21_grbm_select(adev, 0, 0, 0, 0);
4201 		mutex_unlock(&adev->srbm_mutex);
4202 		if (adev->gfx.me.mqd_backup[mqd_idx])
4203 			memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4204 	} else {
4205 		/* restore mqd with the backup copy */
4206 		if (adev->gfx.me.mqd_backup[mqd_idx])
4207 			memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
4208 		/* reset the ring */
4209 		ring->wptr = 0;
4210 		*ring->wptr_cpu_addr = 0;
4211 		amdgpu_ring_clear_ring(ring);
4212 	}
4213 
4214 	return 0;
4215 }
4216 
4217 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
4218 {
4219 	int r, i;
4220 
4221 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4222 		r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
4223 		if (r)
4224 			return r;
4225 	}
4226 
4227 	r = amdgpu_gfx_enable_kgq(adev, 0);
4228 	if (r)
4229 		return r;
4230 
4231 	return gfx_v11_0_cp_gfx_start(adev);
4232 }
4233 
4234 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
4235 				      struct amdgpu_mqd_prop *prop)
4236 {
4237 	struct v11_compute_mqd *mqd = m;
4238 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4239 	uint32_t tmp;
4240 
4241 	mqd->header = 0xC0310800;
4242 	mqd->compute_pipelinestat_enable = 0x00000001;
4243 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4244 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4245 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4246 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4247 	mqd->compute_misc_reserved = 0x00000007;
4248 
4249 	eop_base_addr = prop->eop_gpu_addr >> 8;
4250 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4251 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4252 
4253 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4254 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
4255 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4256 			(order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
4257 
4258 	mqd->cp_hqd_eop_control = tmp;
4259 
4260 	/* enable doorbell? */
4261 	tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
4262 
4263 	if (prop->use_doorbell) {
4264 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4265 				    DOORBELL_OFFSET, prop->doorbell_index);
4266 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4267 				    DOORBELL_EN, 1);
4268 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4269 				    DOORBELL_SOURCE, 0);
4270 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4271 				    DOORBELL_HIT, 0);
4272 	} else {
4273 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4274 				    DOORBELL_EN, 0);
4275 	}
4276 
4277 	mqd->cp_hqd_pq_doorbell_control = tmp;
4278 
4279 	/* disable the queue if it's active */
4280 	mqd->cp_hqd_dequeue_request = 0;
4281 	mqd->cp_hqd_pq_rptr = 0;
4282 	mqd->cp_hqd_pq_wptr_lo = 0;
4283 	mqd->cp_hqd_pq_wptr_hi = 0;
4284 
4285 	/* set the pointer to the MQD */
4286 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
4287 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
4288 
4289 	/* set MQD vmid to 0 */
4290 	tmp = regCP_MQD_CONTROL_DEFAULT;
4291 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4292 	mqd->cp_mqd_control = tmp;
4293 
4294 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4295 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
4296 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4297 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4298 
4299 	/* set up the HQD, this is similar to CP_RB0_CNTL */
4300 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
4301 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4302 			    (order_base_2(prop->queue_size / 4) - 1));
4303 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4304 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4305 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
4306 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
4307 			    prop->allow_tunneling);
4308 	if (prop->kernel_queue) {
4309 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4310 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4311 	}
4312 	if (prop->tmz_queue)
4313 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
4314 	mqd->cp_hqd_pq_control = tmp;
4315 
4316 	/* set the wb address whether it's enabled or not */
4317 	wb_gpu_addr = prop->rptr_gpu_addr;
4318 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4319 	mqd->cp_hqd_pq_rptr_report_addr_hi =
4320 		upper_32_bits(wb_gpu_addr) & 0xffff;
4321 
4322 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4323 	wb_gpu_addr = prop->wptr_gpu_addr;
4324 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4325 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4326 
4327 	tmp = 0;
4328 	/* enable the doorbell if requested */
4329 	if (prop->use_doorbell) {
4330 		tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
4331 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4332 				DOORBELL_OFFSET, prop->doorbell_index);
4333 
4334 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4335 				    DOORBELL_EN, 1);
4336 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4337 				    DOORBELL_SOURCE, 0);
4338 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4339 				    DOORBELL_HIT, 0);
4340 	}
4341 
4342 	mqd->cp_hqd_pq_doorbell_control = tmp;
4343 
4344 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4345 	mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
4346 
4347 	/* set the vmid for the queue */
4348 	mqd->cp_hqd_vmid = 0;
4349 
4350 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
4351 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
4352 	mqd->cp_hqd_persistent_state = tmp;
4353 
4354 	/* set MIN_IB_AVAIL_SIZE */
4355 	tmp = regCP_HQD_IB_CONTROL_DEFAULT;
4356 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4357 	mqd->cp_hqd_ib_control = tmp;
4358 
4359 	/* set static priority for a compute queue/ring */
4360 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
4361 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
4362 
4363 	mqd->cp_hqd_active = prop->hqd_active;
4364 
4365 	/* set UQ fenceaddress */
4366 	mqd->fence_address_lo = lower_32_bits(prop->fence_address);
4367 	mqd->fence_address_hi = upper_32_bits(prop->fence_address);
4368 
4369 	return 0;
4370 }
4371 
4372 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
4373 {
4374 	struct amdgpu_device *adev = ring->adev;
4375 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4376 	int j;
4377 
4378 	/* inactivate the queue */
4379 	if (amdgpu_sriov_vf(adev))
4380 		WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
4381 
4382 	/* disable wptr polling */
4383 	WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4384 
4385 	/* write the EOP addr */
4386 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
4387 	       mqd->cp_hqd_eop_base_addr_lo);
4388 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
4389 	       mqd->cp_hqd_eop_base_addr_hi);
4390 
4391 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4392 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
4393 	       mqd->cp_hqd_eop_control);
4394 
4395 	/* enable doorbell? */
4396 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4397 	       mqd->cp_hqd_pq_doorbell_control);
4398 
4399 	/* disable the queue if it's active */
4400 	if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
4401 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
4402 		for (j = 0; j < adev->usec_timeout; j++) {
4403 			if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
4404 				break;
4405 			udelay(1);
4406 		}
4407 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
4408 		       mqd->cp_hqd_dequeue_request);
4409 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
4410 		       mqd->cp_hqd_pq_rptr);
4411 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4412 		       mqd->cp_hqd_pq_wptr_lo);
4413 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4414 		       mqd->cp_hqd_pq_wptr_hi);
4415 	}
4416 
4417 	/* set the pointer to the MQD */
4418 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
4419 	       mqd->cp_mqd_base_addr_lo);
4420 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
4421 	       mqd->cp_mqd_base_addr_hi);
4422 
4423 	/* set MQD vmid to 0 */
4424 	WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
4425 	       mqd->cp_mqd_control);
4426 
4427 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4428 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
4429 	       mqd->cp_hqd_pq_base_lo);
4430 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
4431 	       mqd->cp_hqd_pq_base_hi);
4432 
4433 	/* set up the HQD, this is similar to CP_RB0_CNTL */
4434 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
4435 	       mqd->cp_hqd_pq_control);
4436 
4437 	/* set the wb address whether it's enabled or not */
4438 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
4439 		mqd->cp_hqd_pq_rptr_report_addr_lo);
4440 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
4441 		mqd->cp_hqd_pq_rptr_report_addr_hi);
4442 
4443 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4444 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
4445 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
4446 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
4447 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
4448 
4449 	/* enable the doorbell if requested */
4450 	if (ring->use_doorbell) {
4451 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
4452 			(adev->doorbell_index.kiq * 2) << 2);
4453 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
4454 			(adev->doorbell_index.userqueue_end * 2) << 2);
4455 	}
4456 
4457 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4458 	       mqd->cp_hqd_pq_doorbell_control);
4459 
4460 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4461 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4462 	       mqd->cp_hqd_pq_wptr_lo);
4463 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4464 	       mqd->cp_hqd_pq_wptr_hi);
4465 
4466 	/* set the vmid for the queue */
4467 	WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
4468 
4469 	WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
4470 	       mqd->cp_hqd_persistent_state);
4471 
4472 	/* activate the queue */
4473 	WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
4474 	       mqd->cp_hqd_active);
4475 
4476 	if (ring->use_doorbell)
4477 		WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4478 
4479 	return 0;
4480 }
4481 
4482 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
4483 {
4484 	struct amdgpu_device *adev = ring->adev;
4485 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4486 
4487 	gfx_v11_0_kiq_setting(ring);
4488 
4489 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4490 		/* reset MQD to a clean status */
4491 		if (adev->gfx.kiq[0].mqd_backup)
4492 			memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
4493 
4494 		/* reset ring buffer */
4495 		ring->wptr = 0;
4496 		amdgpu_ring_clear_ring(ring);
4497 
4498 		mutex_lock(&adev->srbm_mutex);
4499 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4500 		gfx_v11_0_kiq_init_register(ring);
4501 		soc21_grbm_select(adev, 0, 0, 0, 0);
4502 		mutex_unlock(&adev->srbm_mutex);
4503 	} else {
4504 		memset((void *)mqd, 0, sizeof(*mqd));
4505 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4506 			amdgpu_ring_clear_ring(ring);
4507 		mutex_lock(&adev->srbm_mutex);
4508 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4509 		amdgpu_ring_init_mqd(ring);
4510 		gfx_v11_0_kiq_init_register(ring);
4511 		soc21_grbm_select(adev, 0, 0, 0, 0);
4512 		mutex_unlock(&adev->srbm_mutex);
4513 
4514 		if (adev->gfx.kiq[0].mqd_backup)
4515 			memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
4516 	}
4517 
4518 	return 0;
4519 }
4520 
4521 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
4522 {
4523 	struct amdgpu_device *adev = ring->adev;
4524 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4525 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
4526 
4527 	if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
4528 		memset((void *)mqd, 0, sizeof(*mqd));
4529 		mutex_lock(&adev->srbm_mutex);
4530 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4531 		amdgpu_ring_init_mqd(ring);
4532 		soc21_grbm_select(adev, 0, 0, 0, 0);
4533 		mutex_unlock(&adev->srbm_mutex);
4534 
4535 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4536 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4537 	} else {
4538 		/* restore MQD to a clean status */
4539 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4540 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4541 		/* reset ring buffer */
4542 		ring->wptr = 0;
4543 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4544 		amdgpu_ring_clear_ring(ring);
4545 	}
4546 
4547 	return 0;
4548 }
4549 
4550 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4551 {
4552 	gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
4553 	return 0;
4554 }
4555 
4556 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4557 {
4558 	int i, r;
4559 
4560 	if (!amdgpu_async_gfx_ring)
4561 		gfx_v11_0_cp_compute_enable(adev, true);
4562 
4563 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4564 		r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
4565 		if (r)
4566 			return r;
4567 	}
4568 
4569 	return amdgpu_gfx_enable_kcq(adev, 0);
4570 }
4571 
4572 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4573 {
4574 	int r, i;
4575 	struct amdgpu_ring *ring;
4576 
4577 	if (!(adev->flags & AMD_IS_APU))
4578 		gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4579 
4580 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4581 		/* legacy firmware loading */
4582 		r = gfx_v11_0_cp_gfx_load_microcode(adev);
4583 		if (r)
4584 			return r;
4585 
4586 		if (adev->gfx.rs64_enable)
4587 			r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4588 		else
4589 			r = gfx_v11_0_cp_compute_load_microcode(adev);
4590 		if (r)
4591 			return r;
4592 	}
4593 
4594 	gfx_v11_0_cp_set_doorbell_range(adev);
4595 
4596 	if (amdgpu_async_gfx_ring) {
4597 		gfx_v11_0_cp_compute_enable(adev, true);
4598 		gfx_v11_0_cp_gfx_enable(adev, true);
4599 	}
4600 
4601 	if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4602 		r = amdgpu_mes_kiq_hw_init(adev, 0);
4603 	else
4604 		r = gfx_v11_0_kiq_resume(adev);
4605 	if (r)
4606 		return r;
4607 
4608 	r = gfx_v11_0_kcq_resume(adev);
4609 	if (r)
4610 		return r;
4611 
4612 	if (!amdgpu_async_gfx_ring) {
4613 		r = gfx_v11_0_cp_gfx_resume(adev);
4614 		if (r)
4615 			return r;
4616 	} else {
4617 		r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4618 		if (r)
4619 			return r;
4620 	}
4621 
4622 	if (adev->gfx.disable_kq) {
4623 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4624 			ring = &adev->gfx.gfx_ring[i];
4625 			/* we don't want to set ring->ready */
4626 			r = amdgpu_ring_test_ring(ring);
4627 			if (r)
4628 				return r;
4629 		}
4630 		if (amdgpu_async_gfx_ring)
4631 			amdgpu_gfx_disable_kgq(adev, 0);
4632 	} else {
4633 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4634 			ring = &adev->gfx.gfx_ring[i];
4635 			r = amdgpu_ring_test_helper(ring);
4636 			if (r)
4637 				return r;
4638 		}
4639 	}
4640 
4641 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4642 		ring = &adev->gfx.compute_ring[i];
4643 		r = amdgpu_ring_test_helper(ring);
4644 		if (r)
4645 			return r;
4646 	}
4647 
4648 	return 0;
4649 }
4650 
4651 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4652 {
4653 	gfx_v11_0_cp_gfx_enable(adev, enable);
4654 	gfx_v11_0_cp_compute_enable(adev, enable);
4655 }
4656 
4657 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4658 {
4659 	int r;
4660 	bool value;
4661 
4662 	r = adev->gfxhub.funcs->gart_enable(adev);
4663 	if (r)
4664 		return r;
4665 
4666 	amdgpu_device_flush_hdp(adev, NULL);
4667 
4668 	value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
4669 
4670 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4671 	/* TODO investigate why this and the hdp flush above is needed,
4672 	 * are we missing a flush somewhere else? */
4673 	adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
4674 
4675 	return 0;
4676 }
4677 
4678 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4679 {
4680 	u32 tmp;
4681 
4682 	/* select RS64 */
4683 	if (adev->gfx.rs64_enable) {
4684 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4685 		tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4686 		WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4687 
4688 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4689 		tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4690 		WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4691 	}
4692 
4693 	if (amdgpu_emu_mode == 1)
4694 		msleep(100);
4695 }
4696 
4697 static int get_gb_addr_config(struct amdgpu_device * adev)
4698 {
4699 	u32 gb_addr_config;
4700 
4701 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4702 	if (gb_addr_config == 0)
4703 		return -EINVAL;
4704 
4705 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
4706 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4707 
4708 	adev->gfx.config.gb_addr_config = gb_addr_config;
4709 
4710 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4711 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4712 				      GB_ADDR_CONFIG, NUM_PIPES);
4713 
4714 	adev->gfx.config.max_tile_pipes =
4715 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4716 
4717 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4718 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4719 				      GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4720 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4721 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4722 				      GB_ADDR_CONFIG, NUM_RB_PER_SE);
4723 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4724 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4725 				      GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4726 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4727 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4728 				      GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4729 
4730 	return 0;
4731 }
4732 
4733 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4734 {
4735 	uint32_t data;
4736 
4737 	data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4738 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4739 	WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4740 
4741 	data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4742 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4743 	WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4744 }
4745 
4746 static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
4747 {
4748 	int r;
4749 	struct amdgpu_device *adev = ip_block->adev;
4750 
4751 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
4752 				       adev->gfx.cleaner_shader_ptr);
4753 
4754 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4755 		if (adev->gfx.imu.funcs) {
4756 			/* RLC autoload sequence 1: Program rlc ram */
4757 			if (adev->gfx.imu.funcs->program_rlc_ram)
4758 				adev->gfx.imu.funcs->program_rlc_ram(adev);
4759 			/* rlc autoload firmware */
4760 			r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4761 			if (r)
4762 				return r;
4763 		}
4764 	} else {
4765 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4766 			if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4767 				if (adev->gfx.imu.funcs->load_microcode)
4768 					adev->gfx.imu.funcs->load_microcode(adev);
4769 				if (adev->gfx.imu.funcs->setup_imu)
4770 					adev->gfx.imu.funcs->setup_imu(adev);
4771 				if (adev->gfx.imu.funcs->start_imu)
4772 					adev->gfx.imu.funcs->start_imu(adev);
4773 			}
4774 
4775 			/* disable gpa mode in backdoor loading */
4776 			gfx_v11_0_disable_gpa_mode(adev);
4777 		}
4778 	}
4779 
4780 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4781 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4782 		r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4783 		if (r) {
4784 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4785 			return r;
4786 		}
4787 	}
4788 
4789 	adev->gfx.is_poweron = true;
4790 
4791 	if(get_gb_addr_config(adev))
4792 		DRM_WARN("Invalid gb_addr_config !\n");
4793 
4794 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4795 	    adev->gfx.rs64_enable)
4796 		gfx_v11_0_config_gfx_rs64(adev);
4797 
4798 	r = gfx_v11_0_gfxhub_enable(adev);
4799 	if (r)
4800 		return r;
4801 
4802 	if (!amdgpu_emu_mode)
4803 		gfx_v11_0_init_golden_registers(adev);
4804 
4805 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4806 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4807 		/**
4808 		 * For gfx 11, rlc firmware loading relies on smu firmware is
4809 		 * loaded firstly, so in direct type, it has to load smc ucode
4810 		 * here before rlc.
4811 		 */
4812 		r = amdgpu_pm_load_smu_firmware(adev, NULL);
4813 		if (r)
4814 			return r;
4815 	}
4816 
4817 	gfx_v11_0_constants_init(adev);
4818 
4819 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4820 		gfx_v11_0_select_cp_fw_arch(adev);
4821 
4822 	if (adev->nbio.funcs->gc_doorbell_init)
4823 		adev->nbio.funcs->gc_doorbell_init(adev);
4824 
4825 	r = gfx_v11_0_rlc_resume(adev);
4826 	if (r)
4827 		return r;
4828 
4829 	/*
4830 	 * init golden registers and rlc resume may override some registers,
4831 	 * reconfig them here
4832 	 */
4833 	gfx_v11_0_tcp_harvest(adev);
4834 
4835 	r = gfx_v11_0_cp_resume(adev);
4836 	if (r)
4837 		return r;
4838 
4839 	/* get IMU version from HW if it's not set */
4840 	if (!adev->gfx.imu_fw_version)
4841 		adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0);
4842 
4843 	return r;
4844 }
4845 
4846 static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
4847 					      bool enable)
4848 {
4849 	unsigned int irq_type;
4850 	int m, p, r;
4851 
4852 	if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
4853 		for (m = 0; m < adev->gfx.me.num_me; m++) {
4854 			for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
4855 				irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
4856 				if (enable)
4857 					r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
4858 							   irq_type);
4859 				else
4860 					r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
4861 							   irq_type);
4862 				if (r)
4863 					return r;
4864 			}
4865 		}
4866 	}
4867 
4868 	if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
4869 		for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
4870 			for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
4871 				irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
4872 					+ (m * adev->gfx.mec.num_pipe_per_mec)
4873 					+ p;
4874 				if (enable)
4875 					r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
4876 							   irq_type);
4877 				else
4878 					r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
4879 							   irq_type);
4880 				if (r)
4881 					return r;
4882 			}
4883 		}
4884 	}
4885 
4886 	return 0;
4887 }
4888 
4889 static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
4890 {
4891 	struct amdgpu_device *adev = ip_block->adev;
4892 
4893 	cancel_delayed_work_sync(&adev->gfx.idle_work);
4894 
4895 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4896 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4897 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
4898 	gfx_v11_0_set_userq_eop_interrupts(adev, false);
4899 
4900 	if (!adev->no_hw_access) {
4901 		if (amdgpu_async_gfx_ring &&
4902 		    !adev->gfx.disable_kq) {
4903 			if (amdgpu_gfx_disable_kgq(adev, 0))
4904 				DRM_ERROR("KGQ disable failed\n");
4905 		}
4906 
4907 		if (amdgpu_gfx_disable_kcq(adev, 0))
4908 			DRM_ERROR("KCQ disable failed\n");
4909 
4910 		amdgpu_mes_kiq_hw_fini(adev, 0);
4911 	}
4912 
4913 	if (amdgpu_sriov_vf(adev))
4914 		/* Remove the steps disabling CPG and clearing KIQ position,
4915 		 * so that CP could perform IDLE-SAVE during switch. Those
4916 		 * steps are necessary to avoid a DMAR error in gfx9 but it is
4917 		 * not reproduced on gfx11.
4918 		 */
4919 		return 0;
4920 
4921 	gfx_v11_0_cp_enable(adev, false);
4922 	gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4923 
4924 	adev->gfxhub.funcs->gart_disable(adev);
4925 
4926 	adev->gfx.is_poweron = false;
4927 
4928 	return 0;
4929 }
4930 
4931 static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block)
4932 {
4933 	return gfx_v11_0_hw_fini(ip_block);
4934 }
4935 
4936 static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
4937 {
4938 	return gfx_v11_0_hw_init(ip_block);
4939 }
4940 
4941 static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
4942 {
4943 	struct amdgpu_device *adev = ip_block->adev;
4944 
4945 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4946 				GRBM_STATUS, GUI_ACTIVE))
4947 		return false;
4948 	else
4949 		return true;
4950 }
4951 
4952 static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4953 {
4954 	unsigned i;
4955 	u32 tmp;
4956 	struct amdgpu_device *adev = ip_block->adev;
4957 
4958 	for (i = 0; i < adev->usec_timeout; i++) {
4959 		/* read MC_STATUS */
4960 		tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4961 			GRBM_STATUS__GUI_ACTIVE_MASK;
4962 
4963 		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4964 			return 0;
4965 		udelay(1);
4966 	}
4967 	return -ETIMEDOUT;
4968 }
4969 
4970 int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
4971 				      bool req)
4972 {
4973 	u32 i, tmp, val;
4974 
4975 	for (i = 0; i < adev->usec_timeout; i++) {
4976 		/* Request with MeId=2, PipeId=0 */
4977 		tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
4978 		tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
4979 		WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
4980 
4981 		val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
4982 		if (req) {
4983 			if (val == tmp)
4984 				break;
4985 		} else {
4986 			tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
4987 					    REQUEST, 1);
4988 
4989 			/* unlocked or locked by firmware */
4990 			if (val != tmp)
4991 				break;
4992 		}
4993 		udelay(1);
4994 	}
4995 
4996 	if (i >= adev->usec_timeout)
4997 		return -EINVAL;
4998 
4999 	return 0;
5000 }
5001 
5002 static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
5003 {
5004 	u32 grbm_soft_reset = 0;
5005 	u32 tmp;
5006 	int r, i, j, k;
5007 	struct amdgpu_device *adev = ip_block->adev;
5008 
5009 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5010 
5011 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
5012 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
5013 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
5014 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
5015 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
5016 	WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
5017 
5018 	mutex_lock(&adev->srbm_mutex);
5019 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
5020 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
5021 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
5022 				soc21_grbm_select(adev, i, k, j, 0);
5023 
5024 				WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
5025 				WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
5026 			}
5027 		}
5028 	}
5029 	for (i = 0; i < adev->gfx.me.num_me; ++i) {
5030 		for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
5031 			for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
5032 				soc21_grbm_select(adev, i, k, j, 0);
5033 
5034 				WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
5035 			}
5036 		}
5037 	}
5038 	soc21_grbm_select(adev, 0, 0, 0, 0);
5039 	mutex_unlock(&adev->srbm_mutex);
5040 
5041 	/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
5042 	mutex_lock(&adev->gfx.reset_sem_mutex);
5043 	r = gfx_v11_0_request_gfx_index_mutex(adev, true);
5044 	if (r) {
5045 		mutex_unlock(&adev->gfx.reset_sem_mutex);
5046 		DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
5047 		return r;
5048 	}
5049 
5050 	WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
5051 
5052 	// Read CP_VMID_RESET register three times.
5053 	// to get sufficient time for GFX_HQD_ACTIVE reach 0
5054 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
5055 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
5056 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
5057 
5058 	/* release the gfx mutex */
5059 	r = gfx_v11_0_request_gfx_index_mutex(adev, false);
5060 	mutex_unlock(&adev->gfx.reset_sem_mutex);
5061 	if (r) {
5062 		DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
5063 		return r;
5064 	}
5065 
5066 	for (i = 0; i < adev->usec_timeout; i++) {
5067 		if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
5068 		    !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
5069 			break;
5070 		udelay(1);
5071 	}
5072 	if (i >= adev->usec_timeout) {
5073 		printk("Failed to wait all pipes clean\n");
5074 		return -EINVAL;
5075 	}
5076 
5077 	/**********  trigger soft reset  ***********/
5078 	grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
5079 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5080 					SOFT_RESET_CP, 1);
5081 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5082 					SOFT_RESET_GFX, 1);
5083 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5084 					SOFT_RESET_CPF, 1);
5085 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5086 					SOFT_RESET_CPC, 1);
5087 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5088 					SOFT_RESET_CPG, 1);
5089 	WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
5090 	/**********  exit soft reset  ***********/
5091 	grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
5092 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5093 					SOFT_RESET_CP, 0);
5094 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5095 					SOFT_RESET_GFX, 0);
5096 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5097 					SOFT_RESET_CPF, 0);
5098 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5099 					SOFT_RESET_CPC, 0);
5100 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5101 					SOFT_RESET_CPG, 0);
5102 	WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
5103 
5104 	tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
5105 	tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
5106 	WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
5107 
5108 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
5109 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
5110 
5111 	for (i = 0; i < adev->usec_timeout; i++) {
5112 		if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
5113 			break;
5114 		udelay(1);
5115 	}
5116 	if (i >= adev->usec_timeout) {
5117 		printk("Failed to wait CP_VMID_RESET to 0\n");
5118 		return -EINVAL;
5119 	}
5120 
5121 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
5122 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
5123 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
5124 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
5125 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
5126 	WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
5127 
5128 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5129 
5130 	return gfx_v11_0_cp_resume(adev);
5131 }
5132 
5133 static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
5134 {
5135 	int i, r;
5136 	struct amdgpu_device *adev = ip_block->adev;
5137 	struct amdgpu_ring *ring;
5138 	long tmo = msecs_to_jiffies(1000);
5139 
5140 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5141 		ring = &adev->gfx.gfx_ring[i];
5142 		r = amdgpu_ring_test_ib(ring, tmo);
5143 		if (r)
5144 			return true;
5145 	}
5146 
5147 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5148 		ring = &adev->gfx.compute_ring[i];
5149 		r = amdgpu_ring_test_ib(ring, tmo);
5150 		if (r)
5151 			return true;
5152 	}
5153 
5154 	return false;
5155 }
5156 
5157 static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
5158 {
5159 	struct amdgpu_device *adev = ip_block->adev;
5160 	/**
5161 	 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
5162 	 */
5163 	return amdgpu_mes_resume(adev);
5164 }
5165 
5166 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5167 {
5168 	uint64_t clock;
5169 	uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
5170 
5171 	if (amdgpu_sriov_vf(adev)) {
5172 		amdgpu_gfx_off_ctrl(adev, false);
5173 		mutex_lock(&adev->gfx.gpu_clock_mutex);
5174 		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
5175 		clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
5176 		clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
5177 		if (clock_counter_hi_pre != clock_counter_hi_after)
5178 			clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
5179 		mutex_unlock(&adev->gfx.gpu_clock_mutex);
5180 		amdgpu_gfx_off_ctrl(adev, true);
5181 	} else {
5182 		preempt_disable();
5183 		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
5184 		clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
5185 		clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
5186 		if (clock_counter_hi_pre != clock_counter_hi_after)
5187 			clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
5188 		preempt_enable();
5189 	}
5190 	clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
5191 
5192 	return clock;
5193 }
5194 
5195 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5196 					   uint32_t vmid,
5197 					   uint32_t gds_base, uint32_t gds_size,
5198 					   uint32_t gws_base, uint32_t gws_size,
5199 					   uint32_t oa_base, uint32_t oa_size)
5200 {
5201 	struct amdgpu_device *adev = ring->adev;
5202 
5203 	/* GDS Base */
5204 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5205 				    SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
5206 				    gds_base);
5207 
5208 	/* GDS Size */
5209 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5210 				    SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
5211 				    gds_size);
5212 
5213 	/* GWS */
5214 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5215 				    SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
5216 				    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5217 
5218 	/* OA */
5219 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5220 				    SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
5221 				    (1 << (oa_size + oa_base)) - (1 << oa_base));
5222 }
5223 
5224 static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
5225 {
5226 	struct amdgpu_device *adev = ip_block->adev;
5227 
5228 	switch (amdgpu_user_queue) {
5229 	case -1:
5230 	case 0:
5231 	default:
5232 		adev->gfx.disable_kq = false;
5233 		adev->gfx.disable_uq = true;
5234 		break;
5235 	case 1:
5236 		adev->gfx.disable_kq = false;
5237 		adev->gfx.disable_uq = false;
5238 		break;
5239 	case 2:
5240 		adev->gfx.disable_kq = true;
5241 		adev->gfx.disable_uq = false;
5242 		break;
5243 	}
5244 
5245 	adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
5246 
5247 	if (adev->gfx.disable_kq) {
5248 		/* We need one GFX ring temporarily to set up
5249 		 * the clear state.
5250 		 */
5251 		adev->gfx.num_gfx_rings = 1;
5252 		adev->gfx.num_compute_rings = 0;
5253 	} else {
5254 		adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
5255 		adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5256 						  AMDGPU_MAX_COMPUTE_RINGS);
5257 	}
5258 
5259 	gfx_v11_0_set_kiq_pm4_funcs(adev);
5260 	gfx_v11_0_set_ring_funcs(adev);
5261 	gfx_v11_0_set_irq_funcs(adev);
5262 	gfx_v11_0_set_gds_init(adev);
5263 	gfx_v11_0_set_rlc_funcs(adev);
5264 	gfx_v11_0_set_mqd_funcs(adev);
5265 	gfx_v11_0_set_imu_funcs(adev);
5266 
5267 	gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
5268 
5269 	return gfx_v11_0_init_microcode(adev);
5270 }
5271 
5272 static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
5273 {
5274 	struct amdgpu_device *adev = ip_block->adev;
5275 	int r;
5276 
5277 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5278 	if (r)
5279 		return r;
5280 
5281 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5282 	if (r)
5283 		return r;
5284 
5285 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
5286 	if (r)
5287 		return r;
5288 
5289 	r = gfx_v11_0_set_userq_eop_interrupts(adev, true);
5290 	if (r)
5291 		return r;
5292 
5293 	return 0;
5294 }
5295 
5296 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
5297 {
5298 	uint32_t rlc_cntl;
5299 
5300 	/* if RLC is not enabled, do nothing */
5301 	rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
5302 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
5303 }
5304 
5305 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
5306 {
5307 	uint32_t data;
5308 	unsigned i;
5309 
5310 	data = RLC_SAFE_MODE__CMD_MASK;
5311 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5312 
5313 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
5314 
5315 	/* wait for RLC_SAFE_MODE */
5316 	for (i = 0; i < adev->usec_timeout; i++) {
5317 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
5318 				   RLC_SAFE_MODE, CMD))
5319 			break;
5320 		udelay(1);
5321 	}
5322 }
5323 
5324 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
5325 {
5326 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
5327 }
5328 
5329 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
5330 				      bool enable)
5331 {
5332 	uint32_t def, data;
5333 
5334 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
5335 		return;
5336 
5337 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5338 
5339 	if (enable)
5340 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5341 	else
5342 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5343 
5344 	if (def != data)
5345 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5346 }
5347 
5348 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
5349 				       bool enable)
5350 {
5351 	uint32_t def, data;
5352 
5353 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
5354 		return;
5355 
5356 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5357 
5358 	if (enable)
5359 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5360 	else
5361 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5362 
5363 	if (def != data)
5364 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5365 }
5366 
5367 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
5368 					   bool enable)
5369 {
5370 	uint32_t def, data;
5371 
5372 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
5373 		return;
5374 
5375 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5376 
5377 	if (enable)
5378 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5379 	else
5380 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5381 
5382 	if (def != data)
5383 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5384 }
5385 
5386 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5387 						       bool enable)
5388 {
5389 	uint32_t data, def;
5390 
5391 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
5392 		return;
5393 
5394 	/* It is disabled by HW by default */
5395 	if (enable) {
5396 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5397 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
5398 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5399 
5400 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5401 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5402 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5403 
5404 			if (def != data)
5405 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5406 		}
5407 	} else {
5408 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5409 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5410 
5411 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5412 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5413 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5414 
5415 			if (def != data)
5416 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5417 		}
5418 	}
5419 }
5420 
5421 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5422 						       bool enable)
5423 {
5424 	uint32_t def, data;
5425 
5426 	if (!(adev->cg_flags &
5427 	      (AMD_CG_SUPPORT_GFX_CGCG |
5428 	      AMD_CG_SUPPORT_GFX_CGLS |
5429 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
5430 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
5431 		return;
5432 
5433 	if (enable) {
5434 		def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5435 
5436 		/* unset CGCG override */
5437 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5438 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5439 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5440 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5441 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
5442 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5443 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5444 
5445 		/* update CGCG override bits */
5446 		if (def != data)
5447 			WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5448 
5449 		/* enable cgcg FSM(0x0000363F) */
5450 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5451 
5452 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5453 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
5454 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5455 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5456 		}
5457 
5458 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5459 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
5460 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5461 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5462 		}
5463 
5464 		if (def != data)
5465 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5466 
5467 		/* Program RLC_CGCG_CGLS_CTRL_3D */
5468 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5469 
5470 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5471 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
5472 			data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5473 				 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5474 		}
5475 
5476 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5477 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
5478 			data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5479 				 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5480 		}
5481 
5482 		if (def != data)
5483 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5484 
5485 		/* set IDLE_POLL_COUNT(0x00900100) */
5486 		def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
5487 
5488 		data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
5489 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5490 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5491 
5492 		if (def != data)
5493 			WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
5494 
5495 		data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
5496 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
5497 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
5498 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
5499 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
5500 		WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
5501 
5502 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5503 		data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5504 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5505 
5506 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
5507 		if (adev->sdma.num_instances > 1) {
5508 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5509 			data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5510 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5511 		}
5512 	} else {
5513 		/* Program RLC_CGCG_CGLS_CTRL */
5514 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5515 
5516 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5517 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5518 
5519 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5520 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5521 
5522 		if (def != data)
5523 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5524 
5525 		/* Program RLC_CGCG_CGLS_CTRL_3D */
5526 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5527 
5528 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5529 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5530 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5531 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5532 
5533 		if (def != data)
5534 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5535 
5536 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5537 		data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5538 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5539 
5540 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
5541 		if (adev->sdma.num_instances > 1) {
5542 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5543 			data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5544 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5545 		}
5546 	}
5547 }
5548 
5549 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5550 					    bool enable)
5551 {
5552 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5553 
5554 	gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
5555 
5556 	gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
5557 
5558 	gfx_v11_0_update_repeater_fgcg(adev, enable);
5559 
5560 	gfx_v11_0_update_sram_fgcg(adev, enable);
5561 
5562 	gfx_v11_0_update_perf_clk(adev, enable);
5563 
5564 	if (adev->cg_flags &
5565 	    (AMD_CG_SUPPORT_GFX_MGCG |
5566 	     AMD_CG_SUPPORT_GFX_CGLS |
5567 	     AMD_CG_SUPPORT_GFX_CGCG |
5568 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
5569 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
5570 	        gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
5571 
5572 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5573 
5574 	return 0;
5575 }
5576 
5577 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, int xcc_id,
5578 		struct amdgpu_ring *ring, unsigned vmid)
5579 {
5580 	u32 reg, pre_data, data;
5581 
5582 	amdgpu_gfx_off_ctrl(adev, false);
5583 	reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
5584 	if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
5585 		pre_data = RREG32_NO_KIQ(reg);
5586 	else
5587 		pre_data = RREG32(reg);
5588 
5589 	data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
5590 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5591 
5592 	if (pre_data != data) {
5593 		if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
5594 			WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
5595 		} else
5596 			WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
5597 	}
5598 	amdgpu_gfx_off_ctrl(adev, true);
5599 
5600 	if (ring
5601 		&& amdgpu_sriov_is_pp_one_vf(adev)
5602 		&& (pre_data != data)
5603 		&& ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
5604 			|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
5605 		amdgpu_ring_emit_wreg(ring, reg, data);
5606 	}
5607 }
5608 
5609 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5610 	.is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5611 	.set_safe_mode = gfx_v11_0_set_safe_mode,
5612 	.unset_safe_mode = gfx_v11_0_unset_safe_mode,
5613 	.init = gfx_v11_0_rlc_init,
5614 	.get_csb_size = gfx_v11_0_get_csb_size,
5615 	.get_csb_buffer = gfx_v11_0_get_csb_buffer,
5616 	.resume = gfx_v11_0_rlc_resume,
5617 	.stop = gfx_v11_0_rlc_stop,
5618 	.reset = gfx_v11_0_rlc_reset,
5619 	.start = gfx_v11_0_rlc_start,
5620 	.update_spm_vmid = gfx_v11_0_update_spm_vmid,
5621 };
5622 
5623 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
5624 {
5625 	u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
5626 
5627 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5628 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5629 	else
5630 		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5631 
5632 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5633 
5634 	// Program RLC_PG_DELAY3 for CGPG hysteresis
5635 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5636 		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5637 		case IP_VERSION(11, 0, 1):
5638 		case IP_VERSION(11, 0, 4):
5639 		case IP_VERSION(11, 5, 0):
5640 		case IP_VERSION(11, 5, 1):
5641 		case IP_VERSION(11, 5, 2):
5642 		case IP_VERSION(11, 5, 3):
5643 			WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5644 			break;
5645 		default:
5646 			break;
5647 		}
5648 	}
5649 }
5650 
5651 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5652 {
5653 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5654 
5655 	gfx_v11_cntl_power_gating(adev, enable);
5656 
5657 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5658 }
5659 
5660 static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
5661 					   enum amd_powergating_state state)
5662 {
5663 	struct amdgpu_device *adev = ip_block->adev;
5664 	bool enable = (state == AMD_PG_STATE_GATE);
5665 
5666 	if (amdgpu_sriov_vf(adev))
5667 		return 0;
5668 
5669 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5670 	case IP_VERSION(11, 0, 0):
5671 	case IP_VERSION(11, 0, 2):
5672 	case IP_VERSION(11, 0, 3):
5673 		amdgpu_gfx_off_ctrl(adev, enable);
5674 		break;
5675 	case IP_VERSION(11, 0, 1):
5676 	case IP_VERSION(11, 0, 4):
5677 	case IP_VERSION(11, 5, 0):
5678 	case IP_VERSION(11, 5, 1):
5679 	case IP_VERSION(11, 5, 2):
5680 	case IP_VERSION(11, 5, 3):
5681 		if (!enable)
5682 			amdgpu_gfx_off_ctrl(adev, false);
5683 
5684 		gfx_v11_cntl_pg(adev, enable);
5685 
5686 		if (enable)
5687 			amdgpu_gfx_off_ctrl(adev, true);
5688 
5689 		break;
5690 	default:
5691 		break;
5692 	}
5693 
5694 	return 0;
5695 }
5696 
5697 static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
5698 					  enum amd_clockgating_state state)
5699 {
5700 	struct amdgpu_device *adev = ip_block->adev;
5701 
5702 	if (amdgpu_sriov_vf(adev))
5703 	        return 0;
5704 
5705 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5706 	case IP_VERSION(11, 0, 0):
5707 	case IP_VERSION(11, 0, 1):
5708 	case IP_VERSION(11, 0, 2):
5709 	case IP_VERSION(11, 0, 3):
5710 	case IP_VERSION(11, 0, 4):
5711 	case IP_VERSION(11, 5, 0):
5712 	case IP_VERSION(11, 5, 1):
5713 	case IP_VERSION(11, 5, 2):
5714 	case IP_VERSION(11, 5, 3):
5715 	        gfx_v11_0_update_gfx_clock_gating(adev,
5716 	                        state ==  AMD_CG_STATE_GATE);
5717 	        break;
5718 	default:
5719 	        break;
5720 	}
5721 
5722 	return 0;
5723 }
5724 
5725 static void gfx_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
5726 {
5727 	struct amdgpu_device *adev = ip_block->adev;
5728 	int data;
5729 
5730 	/* AMD_CG_SUPPORT_GFX_MGCG */
5731 	data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5732 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5733 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5734 
5735 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
5736 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5737 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5738 
5739 	/* AMD_CG_SUPPORT_GFX_FGCG */
5740 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5741 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
5742 
5743 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
5744 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5745 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5746 
5747 	/* AMD_CG_SUPPORT_GFX_CGCG */
5748 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5749 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5750 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5751 
5752 	/* AMD_CG_SUPPORT_GFX_CGLS */
5753 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5754 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5755 
5756 	/* AMD_CG_SUPPORT_GFX_3D_CGCG */
5757 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5758 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5759 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5760 
5761 	/* AMD_CG_SUPPORT_GFX_3D_CGLS */
5762 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5763 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5764 }
5765 
5766 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5767 {
5768 	/* gfx11 is 32bit rptr*/
5769 	return *(uint32_t *)ring->rptr_cpu_addr;
5770 }
5771 
5772 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5773 {
5774 	struct amdgpu_device *adev = ring->adev;
5775 	u64 wptr;
5776 
5777 	/* XXX check if swapping is necessary on BE */
5778 	if (ring->use_doorbell) {
5779 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5780 	} else {
5781 		wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5782 		wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5783 	}
5784 
5785 	return wptr;
5786 }
5787 
5788 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5789 {
5790 	struct amdgpu_device *adev = ring->adev;
5791 
5792 	if (ring->use_doorbell) {
5793 		/* XXX check if swapping is necessary on BE */
5794 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5795 			     ring->wptr);
5796 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5797 	} else {
5798 		WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5799 			     lower_32_bits(ring->wptr));
5800 		WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5801 			     upper_32_bits(ring->wptr));
5802 	}
5803 }
5804 
5805 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5806 {
5807 	/* gfx11 hardware is 32bit rptr */
5808 	return *(uint32_t *)ring->rptr_cpu_addr;
5809 }
5810 
5811 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5812 {
5813 	u64 wptr;
5814 
5815 	/* XXX check if swapping is necessary on BE */
5816 	if (ring->use_doorbell)
5817 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5818 	else
5819 		BUG();
5820 	return wptr;
5821 }
5822 
5823 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5824 {
5825 	struct amdgpu_device *adev = ring->adev;
5826 
5827 	/* XXX check if swapping is necessary on BE */
5828 	if (ring->use_doorbell) {
5829 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5830 			     ring->wptr);
5831 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5832 	} else {
5833 		BUG(); /* only DOORBELL method supported on gfx11 now */
5834 	}
5835 }
5836 
5837 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5838 {
5839 	struct amdgpu_device *adev = ring->adev;
5840 	u32 ref_and_mask, reg_mem_engine;
5841 
5842 	if (!adev->gfx.funcs->get_hdp_flush_mask) {
5843 		dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
5844 		return;
5845 	}
5846 
5847 	adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
5848 	gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5849 			       adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5850 			       adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5851 			       ref_and_mask, ref_and_mask, 0x20);
5852 }
5853 
5854 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5855 				       struct amdgpu_job *job,
5856 				       struct amdgpu_ib *ib,
5857 				       uint32_t flags)
5858 {
5859 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5860 	u32 header, control = 0;
5861 
5862 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5863 
5864 	control |= ib->length_dw | (vmid << 24);
5865 
5866 	if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5867 		control |= INDIRECT_BUFFER_PRE_ENB(1);
5868 
5869 		if (flags & AMDGPU_IB_PREEMPTED)
5870 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
5871 
5872 		if (vmid && !ring->adev->gfx.rs64_enable)
5873 			gfx_v11_0_ring_emit_de_meta(ring,
5874 				!amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
5875 	}
5876 
5877 	amdgpu_ring_write(ring, header);
5878 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5879 	amdgpu_ring_write(ring,
5880 #ifdef __BIG_ENDIAN
5881 		(2 << 0) |
5882 #endif
5883 		lower_32_bits(ib->gpu_addr));
5884 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5885 	amdgpu_ring_write(ring, control);
5886 }
5887 
5888 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5889 					   struct amdgpu_job *job,
5890 					   struct amdgpu_ib *ib,
5891 					   uint32_t flags)
5892 {
5893 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5894 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5895 
5896 	/* Currently, there is a high possibility to get wave ID mismatch
5897 	 * between ME and GDS, leading to a hw deadlock, because ME generates
5898 	 * different wave IDs than the GDS expects. This situation happens
5899 	 * randomly when at least 5 compute pipes use GDS ordered append.
5900 	 * The wave IDs generated by ME are also wrong after suspend/resume.
5901 	 * Those are probably bugs somewhere else in the kernel driver.
5902 	 *
5903 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5904 	 * GDS to 0 for this ring (me/pipe).
5905 	 */
5906 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5907 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5908 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5909 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5910 	}
5911 
5912 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5913 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5914 	amdgpu_ring_write(ring,
5915 #ifdef __BIG_ENDIAN
5916 				(2 << 0) |
5917 #endif
5918 				lower_32_bits(ib->gpu_addr));
5919 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5920 	amdgpu_ring_write(ring, control);
5921 }
5922 
5923 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5924 				     u64 seq, unsigned flags)
5925 {
5926 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5927 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5928 
5929 	/* RELEASE_MEM - flush caches, send int */
5930 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5931 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5932 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5933 				 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
5934 				 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5935 				 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5936 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5937 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5938 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5939 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5940 
5941 	/*
5942 	 * the address should be Qword aligned if 64bit write, Dword
5943 	 * aligned if only send 32bit data low (discard data high)
5944 	 */
5945 	if (write64bit)
5946 		BUG_ON(addr & 0x7);
5947 	else
5948 		BUG_ON(addr & 0x3);
5949 	amdgpu_ring_write(ring, lower_32_bits(addr));
5950 	amdgpu_ring_write(ring, upper_32_bits(addr));
5951 	amdgpu_ring_write(ring, lower_32_bits(seq));
5952 	amdgpu_ring_write(ring, upper_32_bits(seq));
5953 	amdgpu_ring_write(ring, 0);
5954 }
5955 
5956 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5957 {
5958 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5959 	uint32_t seq = ring->fence_drv.sync_seq;
5960 	uint64_t addr = ring->fence_drv.gpu_addr;
5961 
5962 	gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5963 			       upper_32_bits(addr), seq, 0xffffffff, 4);
5964 }
5965 
5966 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5967 				   uint16_t pasid, uint32_t flush_type,
5968 				   bool all_hub, uint8_t dst_sel)
5969 {
5970 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5971 	amdgpu_ring_write(ring,
5972 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5973 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5974 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5975 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5976 }
5977 
5978 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5979 					 unsigned vmid, uint64_t pd_addr)
5980 {
5981 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5982 
5983 	/* compute doesn't have PFP */
5984 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5985 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
5986 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5987 		amdgpu_ring_write(ring, 0x0);
5988 	}
5989 
5990 	/* Make sure that we can't skip the SET_Q_MODE packets when the VM
5991 	 * changed in any way.
5992 	 */
5993 	ring->set_q_mode_offs = 0;
5994 	ring->set_q_mode_ptr = NULL;
5995 }
5996 
5997 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5998 					  u64 seq, unsigned int flags)
5999 {
6000 	struct amdgpu_device *adev = ring->adev;
6001 
6002 	/* we only allocate 32bit for each seq wb address */
6003 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6004 
6005 	/* write fence seq to the "addr" */
6006 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6007 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6008 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6009 	amdgpu_ring_write(ring, lower_32_bits(addr));
6010 	amdgpu_ring_write(ring, upper_32_bits(addr));
6011 	amdgpu_ring_write(ring, lower_32_bits(seq));
6012 
6013 	if (flags & AMDGPU_FENCE_FLAG_INT) {
6014 		/* set register to trigger INT */
6015 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6016 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6017 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6018 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
6019 		amdgpu_ring_write(ring, 0);
6020 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6021 	}
6022 }
6023 
6024 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
6025 					 uint32_t flags)
6026 {
6027 	uint32_t dw2 = 0;
6028 
6029 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6030 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6031 		/* set load_global_config & load_global_uconfig */
6032 		dw2 |= 0x8001;
6033 		/* set load_cs_sh_regs */
6034 		dw2 |= 0x01000000;
6035 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
6036 		dw2 |= 0x10002;
6037 	}
6038 
6039 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6040 	amdgpu_ring_write(ring, dw2);
6041 	amdgpu_ring_write(ring, 0);
6042 }
6043 
6044 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
6045 						   uint64_t addr)
6046 {
6047 	unsigned ret;
6048 
6049 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6050 	amdgpu_ring_write(ring, lower_32_bits(addr));
6051 	amdgpu_ring_write(ring, upper_32_bits(addr));
6052 	/* discard following DWs if *cond_exec_gpu_addr==0 */
6053 	amdgpu_ring_write(ring, 0);
6054 	ret = ring->wptr & ring->buf_mask;
6055 	/* patch dummy value later */
6056 	amdgpu_ring_write(ring, 0);
6057 
6058 	return ret;
6059 }
6060 
6061 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
6062 					   u64 shadow_va, u64 csa_va,
6063 					   u64 gds_va, bool init_shadow,
6064 					   int vmid)
6065 {
6066 	struct amdgpu_device *adev = ring->adev;
6067 	unsigned int offs, end;
6068 
6069 	if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj)
6070 		return;
6071 
6072 	/*
6073 	 * The logic here isn't easy to understand because we need to keep state
6074 	 * accross multiple executions of the function as well as between the
6075 	 * CPU and GPU. The general idea is that the newly written GPU command
6076 	 * has a condition on the previous one and only executed if really
6077 	 * necessary.
6078 	 */
6079 
6080 	/*
6081 	 * The dw in the NOP controls if the next SET_Q_MODE packet should be
6082 	 * executed or not. Reserve 64bits just to be on the save side.
6083 	 */
6084 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1));
6085 	offs = ring->wptr & ring->buf_mask;
6086 
6087 	/*
6088 	 * We start with skipping the prefix SET_Q_MODE and always executing
6089 	 * the postfix SET_Q_MODE packet. This is changed below with a
6090 	 * WRITE_DATA command when the postfix executed.
6091 	 */
6092 	amdgpu_ring_write(ring, shadow_va ? 1 : 0);
6093 	amdgpu_ring_write(ring, 0);
6094 
6095 	if (ring->set_q_mode_offs) {
6096 		uint64_t addr;
6097 
6098 		addr = amdgpu_bo_gpu_offset(ring->ring_obj);
6099 		addr += ring->set_q_mode_offs << 2;
6100 		end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr);
6101 	}
6102 
6103 	/*
6104 	 * When the postfix SET_Q_MODE packet executes we need to make sure that the
6105 	 * next prefix SET_Q_MODE packet executes as well.
6106 	 */
6107 	if (!shadow_va) {
6108 		uint64_t addr;
6109 
6110 		addr = amdgpu_bo_gpu_offset(ring->ring_obj);
6111 		addr += offs << 2;
6112 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6113 		amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
6114 		amdgpu_ring_write(ring, lower_32_bits(addr));
6115 		amdgpu_ring_write(ring, upper_32_bits(addr));
6116 		amdgpu_ring_write(ring, 0x1);
6117 	}
6118 
6119 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
6120 	amdgpu_ring_write(ring, lower_32_bits(shadow_va));
6121 	amdgpu_ring_write(ring, upper_32_bits(shadow_va));
6122 	amdgpu_ring_write(ring, lower_32_bits(gds_va));
6123 	amdgpu_ring_write(ring, upper_32_bits(gds_va));
6124 	amdgpu_ring_write(ring, lower_32_bits(csa_va));
6125 	amdgpu_ring_write(ring, upper_32_bits(csa_va));
6126 	amdgpu_ring_write(ring, shadow_va ?
6127 			  PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
6128 	amdgpu_ring_write(ring, init_shadow ?
6129 			  PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
6130 
6131 	if (ring->set_q_mode_offs)
6132 		amdgpu_ring_patch_cond_exec(ring, end);
6133 
6134 	if (shadow_va) {
6135 		uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid;
6136 
6137 		/*
6138 		 * If the tokens match try to skip the last postfix SET_Q_MODE
6139 		 * packet to avoid saving/restoring the state all the time.
6140 		 */
6141 		if (ring->set_q_mode_ptr && ring->set_q_mode_token == token)
6142 			*ring->set_q_mode_ptr = 0;
6143 
6144 		ring->set_q_mode_token = token;
6145 	} else {
6146 		ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs];
6147 	}
6148 
6149 	ring->set_q_mode_offs = offs;
6150 }
6151 
6152 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
6153 {
6154 	int i, r = 0;
6155 	struct amdgpu_device *adev = ring->adev;
6156 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
6157 	struct amdgpu_ring *kiq_ring = &kiq->ring;
6158 	unsigned long flags;
6159 
6160 	if (adev->enable_mes)
6161 		return -EINVAL;
6162 
6163 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
6164 		return -EINVAL;
6165 
6166 	spin_lock_irqsave(&kiq->ring_lock, flags);
6167 
6168 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
6169 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
6170 		return -ENOMEM;
6171 	}
6172 
6173 	/* assert preemption condition */
6174 	amdgpu_ring_set_preempt_cond_exec(ring, false);
6175 
6176 	/* assert IB preemption, emit the trailing fence */
6177 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
6178 				   ring->trail_fence_gpu_addr,
6179 				   ++ring->trail_seq);
6180 	amdgpu_ring_commit(kiq_ring);
6181 
6182 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
6183 
6184 	/* poll the trailing fence */
6185 	for (i = 0; i < adev->usec_timeout; i++) {
6186 		if (ring->trail_seq ==
6187 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
6188 			break;
6189 		udelay(1);
6190 	}
6191 
6192 	if (i >= adev->usec_timeout) {
6193 		r = -EINVAL;
6194 		DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
6195 	}
6196 
6197 	/* deassert preemption condition */
6198 	amdgpu_ring_set_preempt_cond_exec(ring, true);
6199 	return r;
6200 }
6201 
6202 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
6203 {
6204 	struct amdgpu_device *adev = ring->adev;
6205 	struct v10_de_ib_state de_payload = {0};
6206 	uint64_t offset, gds_addr, de_payload_gpu_addr;
6207 	void *de_payload_cpu_addr;
6208 	int cnt;
6209 
6210 	offset = offsetof(struct v10_gfx_meta_data, de_payload);
6211 	de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
6212 	de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
6213 
6214 	gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
6215 			 AMDGPU_CSA_SIZE - adev->gds.gds_size,
6216 			 PAGE_SIZE);
6217 
6218 	de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
6219 	de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
6220 
6221 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
6222 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
6223 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
6224 				 WRITE_DATA_DST_SEL(8) |
6225 				 WR_CONFIRM) |
6226 				 WRITE_DATA_CACHE_POLICY(0));
6227 	amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
6228 	amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
6229 
6230 	if (resume)
6231 		amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
6232 					   sizeof(de_payload) >> 2);
6233 	else
6234 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
6235 					   sizeof(de_payload) >> 2);
6236 }
6237 
6238 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
6239 				    bool secure)
6240 {
6241 	uint32_t v = secure ? FRAME_TMZ : 0;
6242 
6243 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
6244 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
6245 }
6246 
6247 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6248 				     uint32_t reg_val_offs)
6249 {
6250 	struct amdgpu_device *adev = ring->adev;
6251 
6252 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6253 	amdgpu_ring_write(ring, 0 |	/* src: register*/
6254 				(5 << 8) |	/* dst: memory */
6255 				(1 << 20));	/* write confirm */
6256 	amdgpu_ring_write(ring, reg);
6257 	amdgpu_ring_write(ring, 0);
6258 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6259 				reg_val_offs * 4));
6260 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6261 				reg_val_offs * 4));
6262 }
6263 
6264 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6265 				   uint32_t val)
6266 {
6267 	uint32_t cmd = 0;
6268 
6269 	switch (ring->funcs->type) {
6270 	case AMDGPU_RING_TYPE_GFX:
6271 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6272 		break;
6273 	case AMDGPU_RING_TYPE_KIQ:
6274 		cmd = (1 << 16); /* no inc addr */
6275 		break;
6276 	default:
6277 		cmd = WR_CONFIRM;
6278 		break;
6279 	}
6280 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6281 	amdgpu_ring_write(ring, cmd);
6282 	amdgpu_ring_write(ring, reg);
6283 	amdgpu_ring_write(ring, 0);
6284 	amdgpu_ring_write(ring, val);
6285 }
6286 
6287 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
6288 					uint32_t val, uint32_t mask)
6289 {
6290 	gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
6291 }
6292 
6293 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
6294 						   uint32_t reg0, uint32_t reg1,
6295 						   uint32_t ref, uint32_t mask)
6296 {
6297 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6298 
6299 	gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
6300 			       ref, mask, 0x20);
6301 }
6302 
6303 static void
6304 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6305 				      uint32_t me, uint32_t pipe,
6306 				      enum amdgpu_interrupt_state state)
6307 {
6308 	uint32_t cp_int_cntl, cp_int_cntl_reg;
6309 
6310 	if (!me) {
6311 		switch (pipe) {
6312 		case 0:
6313 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
6314 			break;
6315 		case 1:
6316 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
6317 			break;
6318 		default:
6319 			DRM_DEBUG("invalid pipe %d\n", pipe);
6320 			return;
6321 		}
6322 	} else {
6323 		DRM_DEBUG("invalid me %d\n", me);
6324 		return;
6325 	}
6326 
6327 	switch (state) {
6328 	case AMDGPU_IRQ_STATE_DISABLE:
6329 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6330 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6331 					    TIME_STAMP_INT_ENABLE, 0);
6332 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6333 					    GENERIC0_INT_ENABLE, 0);
6334 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6335 		break;
6336 	case AMDGPU_IRQ_STATE_ENABLE:
6337 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6338 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6339 					    TIME_STAMP_INT_ENABLE, 1);
6340 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6341 					    GENERIC0_INT_ENABLE, 1);
6342 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6343 		break;
6344 	default:
6345 		break;
6346 	}
6347 }
6348 
6349 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6350 						     int me, int pipe,
6351 						     enum amdgpu_interrupt_state state)
6352 {
6353 	u32 mec_int_cntl, mec_int_cntl_reg;
6354 
6355 	/*
6356 	 * amdgpu controls only the first MEC. That's why this function only
6357 	 * handles the setting of interrupts for this specific MEC. All other
6358 	 * pipes' interrupts are set by amdkfd.
6359 	 */
6360 
6361 	if (me == 1) {
6362 		switch (pipe) {
6363 		case 0:
6364 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6365 			break;
6366 		case 1:
6367 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
6368 			break;
6369 		case 2:
6370 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
6371 			break;
6372 		case 3:
6373 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
6374 			break;
6375 		default:
6376 			DRM_DEBUG("invalid pipe %d\n", pipe);
6377 			return;
6378 		}
6379 	} else {
6380 		DRM_DEBUG("invalid me %d\n", me);
6381 		return;
6382 	}
6383 
6384 	switch (state) {
6385 	case AMDGPU_IRQ_STATE_DISABLE:
6386 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6387 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6388 					     TIME_STAMP_INT_ENABLE, 0);
6389 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6390 					     GENERIC0_INT_ENABLE, 0);
6391 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6392 		break;
6393 	case AMDGPU_IRQ_STATE_ENABLE:
6394 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6395 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6396 					     TIME_STAMP_INT_ENABLE, 1);
6397 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6398 					     GENERIC0_INT_ENABLE, 1);
6399 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6400 		break;
6401 	default:
6402 		break;
6403 	}
6404 }
6405 
6406 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6407 					    struct amdgpu_irq_src *src,
6408 					    unsigned type,
6409 					    enum amdgpu_interrupt_state state)
6410 {
6411 	switch (type) {
6412 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6413 		gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
6414 		break;
6415 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
6416 		gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
6417 		break;
6418 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6419 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6420 		break;
6421 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6422 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6423 		break;
6424 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6425 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6426 		break;
6427 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6428 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6429 		break;
6430 	default:
6431 		break;
6432 	}
6433 	return 0;
6434 }
6435 
6436 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
6437 			     struct amdgpu_irq_src *source,
6438 			     struct amdgpu_iv_entry *entry)
6439 {
6440 	u32 doorbell_offset = entry->src_data[0];
6441 	u8 me_id, pipe_id, queue_id;
6442 	struct amdgpu_ring *ring;
6443 	int i;
6444 
6445 	DRM_DEBUG("IH: CP EOP\n");
6446 
6447 	if (adev->enable_mes && doorbell_offset) {
6448 		struct amdgpu_userq_fence_driver *fence_drv = NULL;
6449 		struct xarray *xa = &adev->userq_xa;
6450 		unsigned long flags;
6451 
6452 		xa_lock_irqsave(xa, flags);
6453 		fence_drv = xa_load(xa, doorbell_offset);
6454 		if (fence_drv)
6455 			amdgpu_userq_fence_driver_process(fence_drv);
6456 		xa_unlock_irqrestore(xa, flags);
6457 	} else {
6458 		me_id = (entry->ring_id & 0x0c) >> 2;
6459 		pipe_id = (entry->ring_id & 0x03) >> 0;
6460 		queue_id = (entry->ring_id & 0x70) >> 4;
6461 
6462 		switch (me_id) {
6463 		case 0:
6464 			if (pipe_id == 0)
6465 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6466 			else
6467 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
6468 			break;
6469 		case 1:
6470 		case 2:
6471 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6472 				ring = &adev->gfx.compute_ring[i];
6473 				/* Per-queue interrupt is supported for MEC starting from VI.
6474 				 * The interrupt can only be enabled/disabled per pipe instead
6475 				 * of per queue.
6476 				 */
6477 				if ((ring->me == me_id) &&
6478 				    (ring->pipe == pipe_id) &&
6479 				    (ring->queue == queue_id))
6480 					amdgpu_fence_process(ring);
6481 			}
6482 			break;
6483 		}
6484 	}
6485 
6486 	return 0;
6487 }
6488 
6489 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6490 					      struct amdgpu_irq_src *source,
6491 					      unsigned int type,
6492 					      enum amdgpu_interrupt_state state)
6493 {
6494 	u32 cp_int_cntl_reg, cp_int_cntl;
6495 	int i, j;
6496 
6497 	switch (state) {
6498 	case AMDGPU_IRQ_STATE_DISABLE:
6499 	case AMDGPU_IRQ_STATE_ENABLE:
6500 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6501 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6502 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6503 
6504 				if (cp_int_cntl_reg) {
6505 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6506 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6507 								    PRIV_REG_INT_ENABLE,
6508 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6509 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6510 				}
6511 			}
6512 		}
6513 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6514 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6515 				/* MECs start at 1 */
6516 				cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
6517 
6518 				if (cp_int_cntl_reg) {
6519 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6520 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6521 								    PRIV_REG_INT_ENABLE,
6522 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6523 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6524 				}
6525 			}
6526 		}
6527 		break;
6528 	default:
6529 		break;
6530 	}
6531 
6532 	return 0;
6533 }
6534 
6535 static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev,
6536 					    struct amdgpu_irq_src *source,
6537 					    unsigned type,
6538 					    enum amdgpu_interrupt_state state)
6539 {
6540 	u32 cp_int_cntl_reg, cp_int_cntl;
6541 	int i, j;
6542 
6543 	switch (state) {
6544 	case AMDGPU_IRQ_STATE_DISABLE:
6545 	case AMDGPU_IRQ_STATE_ENABLE:
6546 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6547 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6548 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6549 
6550 				if (cp_int_cntl_reg) {
6551 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6552 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6553 								    OPCODE_ERROR_INT_ENABLE,
6554 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6555 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6556 				}
6557 			}
6558 		}
6559 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6560 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6561 				/* MECs start at 1 */
6562 				cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
6563 
6564 				if (cp_int_cntl_reg) {
6565 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6566 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6567 								    OPCODE_ERROR_INT_ENABLE,
6568 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6569 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6570 				}
6571 			}
6572 		}
6573 		break;
6574 	default:
6575 		break;
6576 	}
6577 	return 0;
6578 }
6579 
6580 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6581 					       struct amdgpu_irq_src *source,
6582 					       unsigned int type,
6583 					       enum amdgpu_interrupt_state state)
6584 {
6585 	u32 cp_int_cntl_reg, cp_int_cntl;
6586 	int i, j;
6587 
6588 	switch (state) {
6589 	case AMDGPU_IRQ_STATE_DISABLE:
6590 	case AMDGPU_IRQ_STATE_ENABLE:
6591 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6592 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6593 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6594 
6595 				if (cp_int_cntl_reg) {
6596 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6597 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6598 								    PRIV_INSTR_INT_ENABLE,
6599 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6600 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6601 				}
6602 			}
6603 		}
6604 		break;
6605 	default:
6606 		break;
6607 	}
6608 
6609 	return 0;
6610 }
6611 
6612 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
6613 					struct amdgpu_iv_entry *entry)
6614 {
6615 	u8 me_id, pipe_id, queue_id;
6616 	struct amdgpu_ring *ring;
6617 	int i;
6618 
6619 	me_id = (entry->ring_id & 0x0c) >> 2;
6620 	pipe_id = (entry->ring_id & 0x03) >> 0;
6621 	queue_id = (entry->ring_id & 0x70) >> 4;
6622 
6623 	if (!adev->gfx.disable_kq) {
6624 		switch (me_id) {
6625 		case 0:
6626 			for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
6627 				ring = &adev->gfx.gfx_ring[i];
6628 				if (ring->me == me_id && ring->pipe == pipe_id &&
6629 				    ring->queue == queue_id)
6630 					drm_sched_fault(&ring->sched);
6631 			}
6632 			break;
6633 		case 1:
6634 		case 2:
6635 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6636 				ring = &adev->gfx.compute_ring[i];
6637 				if (ring->me == me_id && ring->pipe == pipe_id &&
6638 				    ring->queue == queue_id)
6639 					drm_sched_fault(&ring->sched);
6640 			}
6641 			break;
6642 		default:
6643 			BUG();
6644 			break;
6645 		}
6646 	}
6647 }
6648 
6649 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
6650 				  struct amdgpu_irq_src *source,
6651 				  struct amdgpu_iv_entry *entry)
6652 {
6653 	DRM_ERROR("Illegal register access in command stream\n");
6654 	gfx_v11_0_handle_priv_fault(adev, entry);
6655 	return 0;
6656 }
6657 
6658 static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev,
6659 				struct amdgpu_irq_src *source,
6660 				struct amdgpu_iv_entry *entry)
6661 {
6662 	DRM_ERROR("Illegal opcode in command stream \n");
6663 	gfx_v11_0_handle_priv_fault(adev, entry);
6664 	return 0;
6665 }
6666 
6667 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
6668 				   struct amdgpu_irq_src *source,
6669 				   struct amdgpu_iv_entry *entry)
6670 {
6671 	DRM_ERROR("Illegal instruction in command stream\n");
6672 	gfx_v11_0_handle_priv_fault(adev, entry);
6673 	return 0;
6674 }
6675 
6676 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
6677 				  struct amdgpu_irq_src *source,
6678 				  struct amdgpu_iv_entry *entry)
6679 {
6680 	if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
6681 		return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
6682 
6683 	return 0;
6684 }
6685 
6686 #if 0
6687 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6688 					     struct amdgpu_irq_src *src,
6689 					     unsigned int type,
6690 					     enum amdgpu_interrupt_state state)
6691 {
6692 	uint32_t tmp, target;
6693 	struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
6694 
6695 	target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6696 	target += ring->pipe;
6697 
6698 	switch (type) {
6699 	case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6700 		if (state == AMDGPU_IRQ_STATE_DISABLE) {
6701 			tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6702 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6703 					    GENERIC2_INT_ENABLE, 0);
6704 			WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6705 
6706 			tmp = RREG32_SOC15_IP(GC, target);
6707 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6708 					    GENERIC2_INT_ENABLE, 0);
6709 			WREG32_SOC15_IP(GC, target, tmp);
6710 		} else {
6711 			tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6712 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6713 					    GENERIC2_INT_ENABLE, 1);
6714 			WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6715 
6716 			tmp = RREG32_SOC15_IP(GC, target);
6717 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6718 					    GENERIC2_INT_ENABLE, 1);
6719 			WREG32_SOC15_IP(GC, target, tmp);
6720 		}
6721 		break;
6722 	default:
6723 		BUG(); /* kiq only support GENERIC2_INT now */
6724 		break;
6725 	}
6726 	return 0;
6727 }
6728 #endif
6729 
6730 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6731 {
6732 	const unsigned int gcr_cntl =
6733 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6734 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6735 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6736 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6737 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6738 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6739 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6740 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6741 
6742 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6743 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6744 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6745 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6746 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6747 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6748 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6749 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6750 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6751 }
6752 
6753 static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev)
6754 {
6755 	/* Disable the pipe reset until the CPFW fully support it.*/
6756 	dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
6757 	return false;
6758 }
6759 
6760 
6761 static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
6762 {
6763 	struct amdgpu_device *adev = ring->adev;
6764 	uint32_t reset_pipe = 0, clean_pipe = 0;
6765 	int r;
6766 
6767 	if (!gfx_v11_pipe_reset_support(adev))
6768 		return -EOPNOTSUPP;
6769 
6770 	gfx_v11_0_set_safe_mode(adev, 0);
6771 	mutex_lock(&adev->srbm_mutex);
6772 	soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
6773 
6774 	switch (ring->pipe) {
6775 	case 0:
6776 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
6777 					   PFP_PIPE0_RESET, 1);
6778 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
6779 					   ME_PIPE0_RESET, 1);
6780 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
6781 					   PFP_PIPE0_RESET, 0);
6782 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
6783 					   ME_PIPE0_RESET, 0);
6784 		break;
6785 	case 1:
6786 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
6787 					   PFP_PIPE1_RESET, 1);
6788 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
6789 					   ME_PIPE1_RESET, 1);
6790 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
6791 					   PFP_PIPE1_RESET, 0);
6792 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
6793 					   ME_PIPE1_RESET, 0);
6794 		break;
6795 	default:
6796 		break;
6797 	}
6798 
6799 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
6800 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
6801 
6802 	r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
6803 						RS64_FW_UC_START_ADDR_LO;
6804 	soc21_grbm_select(adev, 0, 0, 0, 0);
6805 	mutex_unlock(&adev->srbm_mutex);
6806 	gfx_v11_0_unset_safe_mode(adev, 0);
6807 
6808 	dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name,
6809 			r == 0 ? "successfully" : "failed");
6810 	/* FIXME: Sometimes driver can't cache the ME firmware start PC correctly,
6811 	 * so the pipe reset status relies on the later gfx ring test result.
6812 	 */
6813 	return 0;
6814 }
6815 
6816 static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
6817 			       unsigned int vmid,
6818 			       struct amdgpu_fence *timedout_fence)
6819 {
6820 	struct amdgpu_device *adev = ring->adev;
6821 	int r;
6822 
6823 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
6824 
6825 	r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false, 0);
6826 	if (r) {
6827 
6828 		dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
6829 		r = gfx_v11_reset_gfx_pipe(ring);
6830 		if (r)
6831 			return r;
6832 	}
6833 
6834 	r = gfx_v11_0_kgq_init_queue(ring, true);
6835 	if (r) {
6836 		dev_err(adev->dev, "failed to init kgq\n");
6837 		return r;
6838 	}
6839 
6840 	r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
6841 	if (r) {
6842 		dev_err(adev->dev, "failed to remap kgq\n");
6843 		return r;
6844 	}
6845 
6846 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
6847 }
6848 
6849 static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
6850 {
6851 
6852 	struct amdgpu_device *adev = ring->adev;
6853 	uint32_t reset_pipe = 0, clean_pipe = 0;
6854 	int r;
6855 
6856 	if (!gfx_v11_pipe_reset_support(adev))
6857 		return -EOPNOTSUPP;
6858 
6859 	gfx_v11_0_set_safe_mode(adev, 0);
6860 	mutex_lock(&adev->srbm_mutex);
6861 	soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
6862 
6863 	reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
6864 	clean_pipe = reset_pipe;
6865 
6866 	if (adev->gfx.rs64_enable) {
6867 
6868 		switch (ring->pipe) {
6869 		case 0:
6870 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
6871 						   MEC_PIPE0_RESET, 1);
6872 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
6873 						   MEC_PIPE0_RESET, 0);
6874 			break;
6875 		case 1:
6876 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
6877 						   MEC_PIPE1_RESET, 1);
6878 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
6879 						   MEC_PIPE1_RESET, 0);
6880 			break;
6881 		case 2:
6882 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
6883 						   MEC_PIPE2_RESET, 1);
6884 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
6885 						   MEC_PIPE2_RESET, 0);
6886 			break;
6887 		case 3:
6888 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
6889 						   MEC_PIPE3_RESET, 1);
6890 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
6891 						   MEC_PIPE3_RESET, 0);
6892 			break;
6893 		default:
6894 			break;
6895 		}
6896 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
6897 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
6898 		r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
6899 					RS64_FW_UC_START_ADDR_LO;
6900 	} else {
6901 		if (ring->me == 1) {
6902 			switch (ring->pipe) {
6903 			case 0:
6904 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6905 							   MEC_ME1_PIPE0_RESET, 1);
6906 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6907 							   MEC_ME1_PIPE0_RESET, 0);
6908 				break;
6909 			case 1:
6910 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6911 							   MEC_ME1_PIPE1_RESET, 1);
6912 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6913 							   MEC_ME1_PIPE1_RESET, 0);
6914 				break;
6915 			case 2:
6916 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6917 							   MEC_ME1_PIPE2_RESET, 1);
6918 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6919 							   MEC_ME1_PIPE2_RESET, 0);
6920 				break;
6921 			case 3:
6922 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6923 							   MEC_ME1_PIPE3_RESET, 1);
6924 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6925 							   MEC_ME1_PIPE3_RESET, 0);
6926 				break;
6927 			default:
6928 				break;
6929 			}
6930 			/* mec1 fw pc: CP_MEC1_INSTR_PNTR */
6931 		} else {
6932 			switch (ring->pipe) {
6933 			case 0:
6934 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6935 							   MEC_ME2_PIPE0_RESET, 1);
6936 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6937 							   MEC_ME2_PIPE0_RESET, 0);
6938 				break;
6939 			case 1:
6940 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6941 							   MEC_ME2_PIPE1_RESET, 1);
6942 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6943 							   MEC_ME2_PIPE1_RESET, 0);
6944 				break;
6945 			case 2:
6946 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6947 							   MEC_ME2_PIPE2_RESET, 1);
6948 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6949 							   MEC_ME2_PIPE2_RESET, 0);
6950 				break;
6951 			case 3:
6952 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6953 							   MEC_ME2_PIPE3_RESET, 1);
6954 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6955 							   MEC_ME2_PIPE3_RESET, 0);
6956 				break;
6957 			default:
6958 				break;
6959 			}
6960 			/* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */
6961 		}
6962 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
6963 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
6964 		r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR));
6965 	}
6966 
6967 	soc21_grbm_select(adev, 0, 0, 0, 0);
6968 	mutex_unlock(&adev->srbm_mutex);
6969 	gfx_v11_0_unset_safe_mode(adev, 0);
6970 
6971 	dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name,
6972 			r == 0 ? "successfully" : "failed");
6973 	/*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe
6974 	 * reset status relies on the compute ring test result.
6975 	 */
6976 	return 0;
6977 }
6978 
6979 static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
6980 			       unsigned int vmid,
6981 			       struct amdgpu_fence *timedout_fence)
6982 {
6983 	struct amdgpu_device *adev = ring->adev;
6984 	int r = 0;
6985 
6986 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
6987 
6988 	r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true, 0);
6989 	if (r) {
6990 		dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
6991 		r = gfx_v11_0_reset_compute_pipe(ring);
6992 		if (r)
6993 			return r;
6994 	}
6995 
6996 	r = gfx_v11_0_kcq_init_queue(ring, true);
6997 	if (r) {
6998 		dev_err(adev->dev, "fail to init kcq\n");
6999 		return r;
7000 	}
7001 	r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
7002 	if (r) {
7003 		dev_err(adev->dev, "failed to remap kcq\n");
7004 		return r;
7005 	}
7006 
7007 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
7008 }
7009 
7010 static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
7011 {
7012 	struct amdgpu_device *adev = ip_block->adev;
7013 	uint32_t i, j, k, reg, index = 0;
7014 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
7015 
7016 	if (!adev->gfx.ip_dump_core)
7017 		return;
7018 
7019 	for (i = 0; i < reg_count; i++)
7020 		drm_printf(p, "%-50s \t 0x%08x\n",
7021 			   gc_reg_list_11_0[i].reg_name,
7022 			   adev->gfx.ip_dump_core[i]);
7023 
7024 	/* print compute queue registers for all instances */
7025 	if (!adev->gfx.ip_dump_compute_queues)
7026 		return;
7027 
7028 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
7029 	drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
7030 		   adev->gfx.mec.num_mec,
7031 		   adev->gfx.mec.num_pipe_per_mec,
7032 		   adev->gfx.mec.num_queue_per_pipe);
7033 
7034 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7035 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7036 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7037 				drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
7038 				for (reg = 0; reg < reg_count; reg++) {
7039 					if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP)
7040 						drm_printf(p, "%-50s \t 0x%08x\n",
7041 							   "regCP_MEC_ME2_HEADER_DUMP",
7042 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7043 					else
7044 						drm_printf(p, "%-50s \t 0x%08x\n",
7045 							   gc_cp_reg_list_11[reg].reg_name,
7046 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7047 				}
7048 				index += reg_count;
7049 			}
7050 		}
7051 	}
7052 
7053 	/* print gfx queue registers for all instances */
7054 	if (!adev->gfx.ip_dump_gfx_queues)
7055 		return;
7056 
7057 	index = 0;
7058 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
7059 	drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n",
7060 		   adev->gfx.me.num_me,
7061 		   adev->gfx.me.num_pipe_per_me,
7062 		   adev->gfx.me.num_queue_per_pipe);
7063 
7064 	for (i = 0; i < adev->gfx.me.num_me; i++) {
7065 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
7066 			for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
7067 				drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k);
7068 				for (reg = 0; reg < reg_count; reg++) {
7069 					drm_printf(p, "%-50s \t 0x%08x\n",
7070 						   gc_gfx_queue_reg_list_11[reg].reg_name,
7071 						   adev->gfx.ip_dump_gfx_queues[index + reg]);
7072 				}
7073 				index += reg_count;
7074 			}
7075 		}
7076 	}
7077 }
7078 
7079 static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
7080 {
7081 	struct amdgpu_device *adev = ip_block->adev;
7082 	uint32_t i, j, k, reg, index = 0;
7083 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
7084 
7085 	if (!adev->gfx.ip_dump_core)
7086 		return;
7087 
7088 	amdgpu_gfx_off_ctrl(adev, false);
7089 	for (i = 0; i < reg_count; i++)
7090 		adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_11_0[i]));
7091 	amdgpu_gfx_off_ctrl(adev, true);
7092 
7093 	/* dump compute queue registers for all instances */
7094 	if (!adev->gfx.ip_dump_compute_queues)
7095 		return;
7096 
7097 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
7098 	amdgpu_gfx_off_ctrl(adev, false);
7099 	mutex_lock(&adev->srbm_mutex);
7100 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7101 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7102 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7103 				/* ME0 is for GFX so start from 1 for CP */
7104 				soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
7105 				for (reg = 0; reg < reg_count; reg++) {
7106 					if (i &&
7107 					    gc_cp_reg_list_11[reg].reg_offset ==
7108 						    regCP_MEC_ME1_HEADER_DUMP)
7109 						adev->gfx.ip_dump_compute_queues[index + reg] =
7110 							RREG32(SOC15_REG_OFFSET(GC, 0,
7111 							       regCP_MEC_ME2_HEADER_DUMP));
7112 					else
7113 						adev->gfx.ip_dump_compute_queues[index + reg] =
7114 							RREG32(SOC15_REG_ENTRY_OFFSET(
7115 								       gc_cp_reg_list_11[reg]));
7116 				}
7117 				index += reg_count;
7118 			}
7119 		}
7120 	}
7121 	soc21_grbm_select(adev, 0, 0, 0, 0);
7122 	mutex_unlock(&adev->srbm_mutex);
7123 	amdgpu_gfx_off_ctrl(adev, true);
7124 
7125 	/* dump gfx queue registers for all instances */
7126 	if (!adev->gfx.ip_dump_gfx_queues)
7127 		return;
7128 
7129 	index = 0;
7130 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
7131 	amdgpu_gfx_off_ctrl(adev, false);
7132 	mutex_lock(&adev->srbm_mutex);
7133 	for (i = 0; i < adev->gfx.me.num_me; i++) {
7134 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
7135 			for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
7136 				soc21_grbm_select(adev, i, j, k, 0);
7137 
7138 				for (reg = 0; reg < reg_count; reg++) {
7139 					adev->gfx.ip_dump_gfx_queues[index + reg] =
7140 						RREG32(SOC15_REG_ENTRY_OFFSET(
7141 							gc_gfx_queue_reg_list_11[reg]));
7142 				}
7143 				index += reg_count;
7144 			}
7145 		}
7146 	}
7147 	soc21_grbm_select(adev, 0, 0, 0, 0);
7148 	mutex_unlock(&adev->srbm_mutex);
7149 	amdgpu_gfx_off_ctrl(adev, true);
7150 }
7151 
7152 static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
7153 {
7154 	/* Emit the cleaner shader */
7155 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
7156 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
7157 }
7158 
7159 static void gfx_v11_0_ring_begin_use(struct amdgpu_ring *ring)
7160 {
7161 	amdgpu_gfx_profile_ring_begin_use(ring);
7162 
7163 	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
7164 }
7165 
7166 static void gfx_v11_0_ring_end_use(struct amdgpu_ring *ring)
7167 {
7168 	amdgpu_gfx_profile_ring_end_use(ring);
7169 
7170 	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
7171 }
7172 
7173 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
7174 	.name = "gfx_v11_0",
7175 	.early_init = gfx_v11_0_early_init,
7176 	.late_init = gfx_v11_0_late_init,
7177 	.sw_init = gfx_v11_0_sw_init,
7178 	.sw_fini = gfx_v11_0_sw_fini,
7179 	.hw_init = gfx_v11_0_hw_init,
7180 	.hw_fini = gfx_v11_0_hw_fini,
7181 	.suspend = gfx_v11_0_suspend,
7182 	.resume = gfx_v11_0_resume,
7183 	.is_idle = gfx_v11_0_is_idle,
7184 	.wait_for_idle = gfx_v11_0_wait_for_idle,
7185 	.soft_reset = gfx_v11_0_soft_reset,
7186 	.check_soft_reset = gfx_v11_0_check_soft_reset,
7187 	.post_soft_reset = gfx_v11_0_post_soft_reset,
7188 	.set_clockgating_state = gfx_v11_0_set_clockgating_state,
7189 	.set_powergating_state = gfx_v11_0_set_powergating_state,
7190 	.get_clockgating_state = gfx_v11_0_get_clockgating_state,
7191 	.dump_ip_state = gfx_v11_ip_dump,
7192 	.print_ip_state = gfx_v11_ip_print,
7193 };
7194 
7195 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
7196 	.type = AMDGPU_RING_TYPE_GFX,
7197 	.align_mask = 0xff,
7198 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7199 	.support_64bit_ptrs = true,
7200 	.secure_submission_supported = true,
7201 	.get_rptr = gfx_v11_0_ring_get_rptr_gfx,
7202 	.get_wptr = gfx_v11_0_ring_get_wptr_gfx,
7203 	.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
7204 	.emit_frame_size = /* totally 247 maximum if 16 IBs */
7205 		5 + /* update_spm_vmid */
7206 		5 + /* COND_EXEC */
7207 		22 + /* SET_Q_PREEMPTION_MODE */
7208 		7 + /* PIPELINE_SYNC */
7209 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7210 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7211 		4 + /* VM_FLUSH */
7212 		8 + /* FENCE for VM_FLUSH */
7213 		20 + /* GDS switch */
7214 		5 + /* COND_EXEC */
7215 		7 + /* HDP_flush */
7216 		4 + /* VGT_flush */
7217 		31 + /*	DE_META */
7218 		3 + /* CNTX_CTRL */
7219 		5 + /* HDP_INVL */
7220 		22 + /* SET_Q_PREEMPTION_MODE */
7221 		8 + 8 + /* FENCE x2 */
7222 		8 + /* gfx_v11_0_emit_mem_sync */
7223 		2, /* gfx_v11_0_ring_emit_cleaner_shader */
7224 	.emit_ib_size =	4, /* gfx_v11_0_ring_emit_ib_gfx */
7225 	.emit_ib = gfx_v11_0_ring_emit_ib_gfx,
7226 	.emit_fence = gfx_v11_0_ring_emit_fence,
7227 	.emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
7228 	.emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
7229 	.emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
7230 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
7231 	.test_ring = gfx_v11_0_ring_test_ring,
7232 	.test_ib = gfx_v11_0_ring_test_ib,
7233 	.insert_nop = gfx_v11_ring_insert_nop,
7234 	.pad_ib = amdgpu_ring_generic_pad_ib,
7235 	.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
7236 	.emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
7237 	.init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
7238 	.preempt_ib = gfx_v11_0_ring_preempt_ib,
7239 	.emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
7240 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
7241 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
7242 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
7243 	.emit_mem_sync = gfx_v11_0_emit_mem_sync,
7244 	.reset = gfx_v11_0_reset_kgq,
7245 	.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
7246 	.begin_use = gfx_v11_0_ring_begin_use,
7247 	.end_use = gfx_v11_0_ring_end_use,
7248 };
7249 
7250 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
7251 	.type = AMDGPU_RING_TYPE_COMPUTE,
7252 	.align_mask = 0xff,
7253 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7254 	.support_64bit_ptrs = true,
7255 	.get_rptr = gfx_v11_0_ring_get_rptr_compute,
7256 	.get_wptr = gfx_v11_0_ring_get_wptr_compute,
7257 	.set_wptr = gfx_v11_0_ring_set_wptr_compute,
7258 	.emit_frame_size =
7259 		5 + /* update_spm_vmid */
7260 		20 + /* gfx_v11_0_ring_emit_gds_switch */
7261 		7 + /* gfx_v11_0_ring_emit_hdp_flush */
7262 		5 + /* hdp invalidate */
7263 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
7264 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7265 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7266 		2 + /* gfx_v11_0_ring_emit_vm_flush */
7267 		8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
7268 		8 + /* gfx_v11_0_emit_mem_sync */
7269 		2, /* gfx_v11_0_ring_emit_cleaner_shader */
7270 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
7271 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
7272 	.emit_fence = gfx_v11_0_ring_emit_fence,
7273 	.emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
7274 	.emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
7275 	.emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
7276 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
7277 	.test_ring = gfx_v11_0_ring_test_ring,
7278 	.test_ib = gfx_v11_0_ring_test_ib,
7279 	.insert_nop = gfx_v11_ring_insert_nop,
7280 	.pad_ib = amdgpu_ring_generic_pad_ib,
7281 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
7282 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
7283 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
7284 	.emit_mem_sync = gfx_v11_0_emit_mem_sync,
7285 	.reset = gfx_v11_0_reset_kcq,
7286 	.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
7287 	.begin_use = gfx_v11_0_ring_begin_use,
7288 	.end_use = gfx_v11_0_ring_end_use,
7289 };
7290 
7291 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
7292 	.type = AMDGPU_RING_TYPE_KIQ,
7293 	.align_mask = 0xff,
7294 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7295 	.support_64bit_ptrs = true,
7296 	.get_rptr = gfx_v11_0_ring_get_rptr_compute,
7297 	.get_wptr = gfx_v11_0_ring_get_wptr_compute,
7298 	.set_wptr = gfx_v11_0_ring_set_wptr_compute,
7299 	.emit_frame_size =
7300 		20 + /* gfx_v11_0_ring_emit_gds_switch */
7301 		7 + /* gfx_v11_0_ring_emit_hdp_flush */
7302 		5 + /*hdp invalidate */
7303 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
7304 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7305 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7306 		8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
7307 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
7308 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
7309 	.emit_fence = gfx_v11_0_ring_emit_fence_kiq,
7310 	.test_ring = gfx_v11_0_ring_test_ring,
7311 	.test_ib = gfx_v11_0_ring_test_ib,
7312 	.insert_nop = amdgpu_ring_insert_nop,
7313 	.pad_ib = amdgpu_ring_generic_pad_ib,
7314 	.emit_rreg = gfx_v11_0_ring_emit_rreg,
7315 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
7316 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
7317 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
7318 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
7319 };
7320 
7321 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
7322 {
7323 	int i;
7324 
7325 	adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
7326 
7327 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7328 		adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
7329 
7330 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
7331 		adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
7332 }
7333 
7334 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
7335 	.set = gfx_v11_0_set_eop_interrupt_state,
7336 	.process = gfx_v11_0_eop_irq,
7337 };
7338 
7339 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
7340 	.set = gfx_v11_0_set_priv_reg_fault_state,
7341 	.process = gfx_v11_0_priv_reg_irq,
7342 };
7343 
7344 static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = {
7345 	.set = gfx_v11_0_set_bad_op_fault_state,
7346 	.process = gfx_v11_0_bad_op_irq,
7347 };
7348 
7349 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
7350 	.set = gfx_v11_0_set_priv_inst_fault_state,
7351 	.process = gfx_v11_0_priv_inst_irq,
7352 };
7353 
7354 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
7355 	.process = gfx_v11_0_rlc_gc_fed_irq,
7356 };
7357 
7358 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
7359 {
7360 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7361 	adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
7362 
7363 	adev->gfx.priv_reg_irq.num_types = 1;
7364 	adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
7365 
7366 	adev->gfx.bad_op_irq.num_types = 1;
7367 	adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs;
7368 
7369 	adev->gfx.priv_inst_irq.num_types = 1;
7370 	adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
7371 
7372 	adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
7373 	adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
7374 
7375 }
7376 
7377 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
7378 {
7379 	if (adev->flags & AMD_IS_APU)
7380 		adev->gfx.imu.mode = MISSION_MODE;
7381 	else
7382 		adev->gfx.imu.mode = DEBUG_MODE;
7383 
7384 	adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
7385 }
7386 
7387 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
7388 {
7389 	adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
7390 }
7391 
7392 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
7393 {
7394 	unsigned total_cu = adev->gfx.config.max_cu_per_sh *
7395 			    adev->gfx.config.max_sh_per_se *
7396 			    adev->gfx.config.max_shader_engines;
7397 
7398 	adev->gds.gds_size = 0x1000;
7399 	adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
7400 	adev->gds.gws_size = 64;
7401 	adev->gds.oa_size = 16;
7402 }
7403 
7404 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
7405 {
7406 	/* set gfx eng mqd */
7407 	adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
7408 		sizeof(struct v11_gfx_mqd);
7409 	adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
7410 		gfx_v11_0_gfx_mqd_init;
7411 	/* set compute eng mqd */
7412 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
7413 		sizeof(struct v11_compute_mqd);
7414 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
7415 		gfx_v11_0_compute_mqd_init;
7416 }
7417 
7418 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
7419 							  u32 bitmap)
7420 {
7421 	u32 data;
7422 
7423 	if (!bitmap)
7424 		return;
7425 
7426 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
7427 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
7428 
7429 	WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
7430 }
7431 
7432 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
7433 {
7434 	u32 data, wgp_bitmask;
7435 	data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
7436 	data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
7437 
7438 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
7439 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
7440 
7441 	wgp_bitmask =
7442 		amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
7443 
7444 	return (~data) & wgp_bitmask;
7445 }
7446 
7447 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
7448 {
7449 	u32 wgp_idx, wgp_active_bitmap;
7450 	u32 cu_bitmap_per_wgp, cu_active_bitmap;
7451 
7452 	wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
7453 	cu_active_bitmap = 0;
7454 
7455 	for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
7456 		/* if there is one WGP enabled, it means 2 CUs will be enabled */
7457 		cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
7458 		if (wgp_active_bitmap & (1 << wgp_idx))
7459 			cu_active_bitmap |= cu_bitmap_per_wgp;
7460 	}
7461 
7462 	return cu_active_bitmap;
7463 }
7464 
7465 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
7466 				 struct amdgpu_cu_info *cu_info)
7467 {
7468 	int i, j, k, counter, active_cu_number = 0;
7469 	u32 mask, bitmap;
7470 	unsigned disable_masks[8 * 2];
7471 
7472 	if (!adev || !cu_info)
7473 		return -EINVAL;
7474 
7475 	amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
7476 
7477 	mutex_lock(&adev->grbm_idx_mutex);
7478 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7479 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7480 			bitmap = i * adev->gfx.config.max_sh_per_se + j;
7481 			if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
7482 				continue;
7483 			mask = 1;
7484 			counter = 0;
7485 			gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
7486 			if (i < 8 && j < 2)
7487 				gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
7488 					adev, disable_masks[i * 2 + j]);
7489 			bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
7490 
7491 			/**
7492 			 * GFX11 could support more than 4 SEs, while the bitmap
7493 			 * in cu_info struct is 4x4 and ioctl interface struct
7494 			 * drm_amdgpu_info_device should keep stable.
7495 			 * So we use last two columns of bitmap to store cu mask for
7496 			 * SEs 4 to 7, the layout of the bitmap is as below:
7497 			 *    SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
7498 			 *    SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
7499 			 *    SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
7500 			 *    SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
7501 			 *    SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
7502 			 *    SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
7503 			 *    SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
7504 			 *    SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
7505 			 */
7506 			cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
7507 
7508 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
7509 				if (bitmap & mask)
7510 					counter++;
7511 
7512 				mask <<= 1;
7513 			}
7514 			active_cu_number += counter;
7515 		}
7516 	}
7517 	gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7518 	mutex_unlock(&adev->grbm_idx_mutex);
7519 
7520 	cu_info->number = active_cu_number;
7521 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7522 
7523 	return 0;
7524 }
7525 
7526 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
7527 {
7528 	.type = AMD_IP_BLOCK_TYPE_GFX,
7529 	.major = 11,
7530 	.minor = 0,
7531 	.rev = 0,
7532 	.funcs = &gfx_v11_0_ip_funcs,
7533 };
7534