xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c (revision 53c271b9a06ca307c2ce6994877d8d084d031962)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "imu_v11_0.h"
33 #include "soc21.h"
34 #include "nvd.h"
35 
36 #include "gc/gc_11_0_0_offset.h"
37 #include "gc/gc_11_0_0_sh_mask.h"
38 #include "smuio/smuio_13_0_6_offset.h"
39 #include "smuio/smuio_13_0_6_sh_mask.h"
40 #include "navi10_enum.h"
41 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
42 
43 #include "soc15.h"
44 #include "clearstate_gfx11.h"
45 #include "v11_structs.h"
46 #include "gfx_v11_0.h"
47 #include "gfx_v11_0_cleaner_shader.h"
48 #include "gfx_v11_0_3.h"
49 #include "nbio_v4_3.h"
50 #include "mes_v11_0.h"
51 #include "mes_userqueue.h"
52 #include "amdgpu_userq_fence.h"
53 
54 #define GFX11_NUM_GFX_RINGS		1
55 #define GFX11_MEC_HPD_SIZE	2048
56 
57 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
58 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1	0x1388
59 
60 #define regCGTT_WD_CLK_CTRL		0x5086
61 #define regCGTT_WD_CLK_CTRL_BASE_IDX	1
62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1	0x4e7e
63 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX	1
64 #define regPC_CONFIG_CNTL_1		0x194d
65 #define regPC_CONFIG_CNTL_1_BASE_IDX	1
66 
67 #define regCP_GFX_MQD_CONTROL_DEFAULT                                             0x00000100
68 #define regCP_GFX_HQD_VMID_DEFAULT                                                0x00000000
69 #define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT                                      0x00000000
70 #define regCP_GFX_HQD_QUANTUM_DEFAULT                                             0x00000a01
71 #define regCP_GFX_HQD_CNTL_DEFAULT                                                0x00a00000
72 #define regCP_RB_DOORBELL_CONTROL_DEFAULT                                         0x00000000
73 #define regCP_GFX_HQD_RPTR_DEFAULT                                                0x00000000
74 
75 #define regCP_HQD_EOP_CONTROL_DEFAULT                                             0x00000006
76 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
77 #define regCP_MQD_CONTROL_DEFAULT                                                 0x00000100
78 #define regCP_HQD_PQ_CONTROL_DEFAULT                                              0x00308509
79 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
80 #define regCP_HQD_PQ_RPTR_DEFAULT                                                 0x00000000
81 #define regCP_HQD_PERSISTENT_STATE_DEFAULT                                        0x0be05501
82 #define regCP_HQD_IB_CONTROL_DEFAULT                                              0x00300000
83 
84 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
86 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
87 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
88 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
89 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
90 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
91 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
93 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
94 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
95 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
97 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
98 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
99 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
100 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
101 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
102 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
103 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
104 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
105 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
106 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
107 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin");
108 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
109 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
110 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
111 MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin");
112 MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin");
113 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin");
114 MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin");
115 MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin");
116 MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin");
117 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin");
118 MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin");
119 MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.bin");
120 MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin");
121 MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin");
122 MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin");
123 
124 static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
125 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
126 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
127 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3),
128 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
139 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
140 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
143 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR),
144 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE),
145 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR),
146 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR),
147 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_BASE),
148 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_RPTR),
149 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_WPTR),
150 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
151 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ),
152 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
153 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
154 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
155 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO),
156 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI),
157 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ),
158 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
159 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
160 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
161 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
162 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
163 	SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS),
164 	SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2),
165 	SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS),
166 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
167 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
168 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES),
169 	SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS),
170 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
171 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL),
172 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS),
173 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
174 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
175 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL),
176 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
177 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_DEBUG_INTERRUPT_INSTR_PNTR),
178 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR),
179 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR),
180 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
181 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
182 	/* cp header registers */
183 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
184 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
185 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
186 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
187 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
188 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
189 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
190 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
191 	/* SE status registers */
192 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
193 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
194 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
195 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3),
196 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE4),
197 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE5)
198 };
199 
200 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
201 	/* compute registers */
202 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
203 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
204 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
205 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
206 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
207 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
208 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
209 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
210 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
211 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
212 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
213 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
214 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
215 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
216 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
217 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
218 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
219 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
220 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
221 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
222 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
223 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
224 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
225 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
226 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
227 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
228 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
229 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
230 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
231 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
232 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
233 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
234 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
235 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
236 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
237 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
238 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
239 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
240 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
241 	/* cp header registers */
242 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
243 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
244 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
245 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
246 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
247 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
248 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
249 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
250 };
251 
252 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
253 	/* gfx queue registers */
254 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE),
255 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID),
256 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY),
257 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM),
258 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE),
259 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI),
260 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET),
261 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL),
262 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR),
263 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR),
264 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI),
265 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST),
266 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED),
267 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL),
268 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0),
269 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0),
270 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR),
271 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI),
272 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO),
273 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI),
274 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
275 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
276 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
277 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
278 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
279 	/* cp header registers */
280 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
281 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
282 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
283 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
284 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
285 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
286 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
287 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
288 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
289 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
290 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
291 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
292 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
293 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
294 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
295 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
296 };
297 
298 static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
299 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
300 };
301 
302 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
303 {
304 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
305 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
306 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
307 	SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
308 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
309 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
310 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
311 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
312 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
313 };
314 
315 #define DEFAULT_SH_MEM_CONFIG \
316 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
317 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
318 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
319 
320 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
321 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
322 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
323 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
324 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
325 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
326 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
327 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
328                                  struct amdgpu_cu_info *cu_info);
329 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
330 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
331 				   u32 sh_num, u32 instance, int xcc_id);
332 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
333 
334 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
335 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
336 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
337 				     uint32_t val);
338 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
339 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
340 					   uint16_t pasid, uint32_t flush_type,
341 					   bool all_hub, uint8_t dst_sel);
342 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
343 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
344 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
345 				      bool enable);
346 
347 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
348 {
349 	struct amdgpu_device *adev = kiq_ring->adev;
350 	u64 shader_mc_addr;
351 
352 	/* Cleaner shader MC address */
353 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
354 
355 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
356 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
357 			  PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
358 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
359 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
360 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
361 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
362 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
363 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
364 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
365 }
366 
367 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
368 				 struct amdgpu_ring *ring)
369 {
370 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
371 	uint64_t wptr_addr = ring->wptr_gpu_addr;
372 	uint32_t me = 0, eng_sel = 0;
373 
374 	switch (ring->funcs->type) {
375 	case AMDGPU_RING_TYPE_COMPUTE:
376 		me = 1;
377 		eng_sel = 0;
378 		break;
379 	case AMDGPU_RING_TYPE_GFX:
380 		me = 0;
381 		eng_sel = 4;
382 		break;
383 	case AMDGPU_RING_TYPE_MES:
384 		me = 2;
385 		eng_sel = 5;
386 		break;
387 	default:
388 		WARN_ON(1);
389 	}
390 
391 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
392 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
393 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
394 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
395 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
396 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
397 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
398 			  PACKET3_MAP_QUEUES_ME((me)) |
399 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
400 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
401 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
402 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
403 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
404 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
405 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
406 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
407 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
408 }
409 
410 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
411 				   struct amdgpu_ring *ring,
412 				   enum amdgpu_unmap_queues_action action,
413 				   u64 gpu_addr, u64 seq)
414 {
415 	struct amdgpu_device *adev = kiq_ring->adev;
416 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
417 
418 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
419 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
420 		return;
421 	}
422 
423 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
424 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
425 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
426 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
427 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
428 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
429 	amdgpu_ring_write(kiq_ring,
430 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
431 
432 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
433 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
434 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
435 		amdgpu_ring_write(kiq_ring, seq);
436 	} else {
437 		amdgpu_ring_write(kiq_ring, 0);
438 		amdgpu_ring_write(kiq_ring, 0);
439 		amdgpu_ring_write(kiq_ring, 0);
440 	}
441 }
442 
443 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
444 				   struct amdgpu_ring *ring,
445 				   u64 addr,
446 				   u64 seq)
447 {
448 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
449 
450 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
451 	amdgpu_ring_write(kiq_ring,
452 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
453 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
454 			  PACKET3_QUERY_STATUS_COMMAND(2));
455 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
456 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
457 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
458 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
459 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
460 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
461 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
462 }
463 
464 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
465 				uint16_t pasid, uint32_t flush_type,
466 				bool all_hub)
467 {
468 	gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
469 }
470 
471 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
472 	.kiq_set_resources = gfx11_kiq_set_resources,
473 	.kiq_map_queues = gfx11_kiq_map_queues,
474 	.kiq_unmap_queues = gfx11_kiq_unmap_queues,
475 	.kiq_query_status = gfx11_kiq_query_status,
476 	.kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
477 	.set_resources_size = 8,
478 	.map_queues_size = 7,
479 	.unmap_queues_size = 6,
480 	.query_status_size = 7,
481 	.invalidate_tlbs_size = 2,
482 };
483 
484 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
485 {
486 	adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
487 }
488 
489 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
490 {
491 	if (amdgpu_sriov_vf(adev))
492 		return;
493 
494 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
495 	case IP_VERSION(11, 0, 1):
496 	case IP_VERSION(11, 0, 4):
497 		soc15_program_register_sequence(adev,
498 						golden_settings_gc_11_0_1,
499 						(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
500 		break;
501 	default:
502 		break;
503 	}
504 	soc15_program_register_sequence(adev,
505 					golden_settings_gc_11_0,
506 					(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
507 
508 }
509 
510 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
511 				       bool wc, uint32_t reg, uint32_t val)
512 {
513 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
514 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
515 			  WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
516 	amdgpu_ring_write(ring, reg);
517 	amdgpu_ring_write(ring, 0);
518 	amdgpu_ring_write(ring, val);
519 }
520 
521 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
522 				  int mem_space, int opt, uint32_t addr0,
523 				  uint32_t addr1, uint32_t ref, uint32_t mask,
524 				  uint32_t inv)
525 {
526 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
527 	amdgpu_ring_write(ring,
528 			  /* memory (1) or register (0) */
529 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
530 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
531 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
532 			   WAIT_REG_MEM_ENGINE(eng_sel)));
533 
534 	if (mem_space)
535 		BUG_ON(addr0 & 0x3); /* Dword align */
536 	amdgpu_ring_write(ring, addr0);
537 	amdgpu_ring_write(ring, addr1);
538 	amdgpu_ring_write(ring, ref);
539 	amdgpu_ring_write(ring, mask);
540 	amdgpu_ring_write(ring, inv); /* poll interval */
541 }
542 
543 static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
544 {
545 	/* Header itself is a NOP packet */
546 	if (num_nop == 1) {
547 		amdgpu_ring_write(ring, ring->funcs->nop);
548 		return;
549 	}
550 
551 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
552 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
553 
554 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
555 	amdgpu_ring_insert_nop(ring, num_nop - 1);
556 }
557 
558 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
559 {
560 	struct amdgpu_device *adev = ring->adev;
561 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
562 	uint32_t tmp = 0;
563 	unsigned i;
564 	int r;
565 
566 	WREG32(scratch, 0xCAFEDEAD);
567 	r = amdgpu_ring_alloc(ring, 5);
568 	if (r) {
569 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
570 			  ring->idx, r);
571 		return r;
572 	}
573 
574 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
575 		gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
576 	} else {
577 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
578 		amdgpu_ring_write(ring, scratch -
579 				  PACKET3_SET_UCONFIG_REG_START);
580 		amdgpu_ring_write(ring, 0xDEADBEEF);
581 	}
582 	amdgpu_ring_commit(ring);
583 
584 	for (i = 0; i < adev->usec_timeout; i++) {
585 		tmp = RREG32(scratch);
586 		if (tmp == 0xDEADBEEF)
587 			break;
588 		if (amdgpu_emu_mode == 1)
589 			msleep(1);
590 		else
591 			udelay(1);
592 	}
593 
594 	if (i >= adev->usec_timeout)
595 		r = -ETIMEDOUT;
596 	return r;
597 }
598 
599 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
600 {
601 	struct amdgpu_device *adev = ring->adev;
602 	struct amdgpu_ib ib;
603 	struct dma_fence *f = NULL;
604 	unsigned index;
605 	uint64_t gpu_addr;
606 	volatile uint32_t *cpu_ptr;
607 	long r;
608 
609 	/* MES KIQ fw hasn't indirect buffer support for now */
610 	if (adev->enable_mes_kiq &&
611 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
612 		return 0;
613 
614 	memset(&ib, 0, sizeof(ib));
615 
616 	r = amdgpu_device_wb_get(adev, &index);
617 	if (r)
618 		return r;
619 
620 	gpu_addr = adev->wb.gpu_addr + (index * 4);
621 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
622 	cpu_ptr = &adev->wb.wb[index];
623 
624 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
625 	if (r) {
626 		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
627 		goto err1;
628 	}
629 
630 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
631 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
632 	ib.ptr[2] = lower_32_bits(gpu_addr);
633 	ib.ptr[3] = upper_32_bits(gpu_addr);
634 	ib.ptr[4] = 0xDEADBEEF;
635 	ib.length_dw = 5;
636 
637 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
638 	if (r)
639 		goto err2;
640 
641 	r = dma_fence_wait_timeout(f, false, timeout);
642 	if (r == 0) {
643 		r = -ETIMEDOUT;
644 		goto err2;
645 	} else if (r < 0) {
646 		goto err2;
647 	}
648 
649 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
650 		r = 0;
651 	else
652 		r = -EINVAL;
653 err2:
654 	amdgpu_ib_free(&ib, NULL);
655 	dma_fence_put(f);
656 err1:
657 	amdgpu_device_wb_free(adev, index);
658 	return r;
659 }
660 
661 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
662 {
663 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
664 	amdgpu_ucode_release(&adev->gfx.me_fw);
665 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
666 	amdgpu_ucode_release(&adev->gfx.mec_fw);
667 
668 	kfree(adev->gfx.rlc.register_list_format);
669 }
670 
671 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
672 {
673 	const struct psp_firmware_header_v1_0 *toc_hdr;
674 	int err = 0;
675 
676 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
677 				   AMDGPU_UCODE_REQUIRED,
678 				   "amdgpu/%s_toc.bin", ucode_prefix);
679 	if (err)
680 		goto out;
681 
682 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
683 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
684 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
685 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
686 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
687 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
688 	return 0;
689 out:
690 	amdgpu_ucode_release(&adev->psp.toc_fw);
691 	return err;
692 }
693 
694 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
695 {
696 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
697 	case IP_VERSION(11, 0, 0):
698 	case IP_VERSION(11, 0, 2):
699 	case IP_VERSION(11, 0, 3):
700 		if ((adev->gfx.me_fw_version >= 1505) &&
701 		    (adev->gfx.pfp_fw_version >= 1600) &&
702 		    (adev->gfx.mec_fw_version >= 512)) {
703 			if (amdgpu_sriov_vf(adev))
704 				adev->gfx.cp_gfx_shadow = true;
705 			else
706 				adev->gfx.cp_gfx_shadow = false;
707 		}
708 		break;
709 	default:
710 		adev->gfx.cp_gfx_shadow = false;
711 		break;
712 	}
713 }
714 
715 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
716 {
717 	char ucode_prefix[25];
718 	int err;
719 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
720 	uint16_t version_major;
721 	uint16_t version_minor;
722 
723 	DRM_DEBUG("\n");
724 
725 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
726 	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
727 				   AMDGPU_UCODE_REQUIRED,
728 				   "amdgpu/%s_pfp.bin", ucode_prefix);
729 	if (err)
730 		goto out;
731 	/* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
732 	adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
733 				(union amdgpu_firmware_header *)
734 				adev->gfx.pfp_fw->data, 2, 0);
735 	if (adev->gfx.rs64_enable) {
736 		dev_info(adev->dev, "CP RS64 enable\n");
737 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
738 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
739 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
740 	} else {
741 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
742 	}
743 
744 	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
745 				   AMDGPU_UCODE_REQUIRED,
746 				   "amdgpu/%s_me.bin", ucode_prefix);
747 	if (err)
748 		goto out;
749 	if (adev->gfx.rs64_enable) {
750 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
751 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
752 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
753 	} else {
754 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
755 	}
756 
757 	if (!amdgpu_sriov_vf(adev)) {
758 		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
759 		    adev->pdev->revision == 0xCE)
760 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
761 						   AMDGPU_UCODE_REQUIRED,
762 						   "amdgpu/gc_11_0_0_rlc_1.bin");
763 		else if (amdgpu_is_kicker_fw(adev))
764 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
765 						   AMDGPU_UCODE_REQUIRED,
766 						   "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
767 		else
768 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
769 						   AMDGPU_UCODE_REQUIRED,
770 						   "amdgpu/%s_rlc.bin", ucode_prefix);
771 		if (err)
772 			goto out;
773 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
774 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
775 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
776 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
777 		if (err)
778 			goto out;
779 	}
780 
781 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
782 				   AMDGPU_UCODE_REQUIRED,
783 				   "amdgpu/%s_mec.bin", ucode_prefix);
784 	if (err)
785 		goto out;
786 	if (adev->gfx.rs64_enable) {
787 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
788 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
789 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
790 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
791 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
792 	} else {
793 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
794 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
795 	}
796 
797 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
798 		err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
799 
800 	/* only one MEC for gfx 11.0.0. */
801 	adev->gfx.mec2_fw = NULL;
802 
803 	gfx_v11_0_check_fw_cp_gfx_shadow(adev);
804 
805 	if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) {
806 		err = adev->gfx.imu.funcs->init_microcode(adev);
807 		if (err)
808 			DRM_ERROR("Failed to init imu firmware!\n");
809 		return err;
810 	}
811 
812 out:
813 	if (err) {
814 		amdgpu_ucode_release(&adev->gfx.pfp_fw);
815 		amdgpu_ucode_release(&adev->gfx.me_fw);
816 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
817 		amdgpu_ucode_release(&adev->gfx.mec_fw);
818 	}
819 
820 	return err;
821 }
822 
823 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
824 {
825 	u32 count = 0;
826 	const struct cs_section_def *sect = NULL;
827 	const struct cs_extent_def *ext = NULL;
828 
829 	/* begin clear state */
830 	count += 2;
831 	/* context control state */
832 	count += 3;
833 
834 	for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
835 		for (ext = sect->section; ext->extent != NULL; ++ext) {
836 			if (sect->id == SECT_CONTEXT)
837 				count += 2 + ext->reg_count;
838 			else
839 				return 0;
840 		}
841 	}
842 
843 	/* set PA_SC_TILE_STEERING_OVERRIDE */
844 	count += 3;
845 	/* end clear state */
846 	count += 2;
847 	/* clear state */
848 	count += 2;
849 
850 	return count;
851 }
852 
853 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
854 				    volatile u32 *buffer)
855 {
856 	u32 count = 0;
857 	int ctx_reg_offset;
858 
859 	if (adev->gfx.rlc.cs_data == NULL)
860 		return;
861 	if (buffer == NULL)
862 		return;
863 
864 	count = amdgpu_gfx_csb_preamble_start(buffer);
865 	count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
866 
867 	ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
868 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
869 	buffer[count++] = cpu_to_le32(ctx_reg_offset);
870 	buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
871 
872 	amdgpu_gfx_csb_preamble_end(buffer, count);
873 }
874 
875 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
876 {
877 	/* clear state block */
878 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
879 			&adev->gfx.rlc.clear_state_gpu_addr,
880 			(void **)&adev->gfx.rlc.cs_ptr);
881 
882 	/* jump table block */
883 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
884 			&adev->gfx.rlc.cp_table_gpu_addr,
885 			(void **)&adev->gfx.rlc.cp_table_ptr);
886 }
887 
888 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
889 {
890 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
891 
892 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
893 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
894 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
895 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
896 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
897 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
898 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
899 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
900 	adev->gfx.rlc.rlcg_reg_access_supported = true;
901 }
902 
903 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
904 {
905 	const struct cs_section_def *cs_data;
906 	int r;
907 
908 	adev->gfx.rlc.cs_data = gfx11_cs_data;
909 
910 	cs_data = adev->gfx.rlc.cs_data;
911 
912 	if (cs_data) {
913 		/* init clear state block */
914 		r = amdgpu_gfx_rlc_init_csb(adev);
915 		if (r)
916 			return r;
917 	}
918 
919 	/* init spm vmid with 0xf */
920 	if (adev->gfx.rlc.funcs->update_spm_vmid)
921 		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
922 
923 	return 0;
924 }
925 
926 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
927 {
928 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
929 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
930 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
931 }
932 
933 static void gfx_v11_0_me_init(struct amdgpu_device *adev)
934 {
935 	bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
936 
937 	amdgpu_gfx_graphics_queue_acquire(adev);
938 }
939 
940 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
941 {
942 	int r;
943 	u32 *hpd;
944 	size_t mec_hpd_size;
945 
946 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
947 
948 	/* take ownership of the relevant compute queues */
949 	amdgpu_gfx_compute_queue_acquire(adev);
950 	mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
951 
952 	if (mec_hpd_size) {
953 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
954 					      AMDGPU_GEM_DOMAIN_GTT,
955 					      &adev->gfx.mec.hpd_eop_obj,
956 					      &adev->gfx.mec.hpd_eop_gpu_addr,
957 					      (void **)&hpd);
958 		if (r) {
959 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
960 			gfx_v11_0_mec_fini(adev);
961 			return r;
962 		}
963 
964 		memset(hpd, 0, mec_hpd_size);
965 
966 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
967 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
968 	}
969 
970 	return 0;
971 }
972 
973 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
974 {
975 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
976 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
977 		(address << SQ_IND_INDEX__INDEX__SHIFT));
978 	return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
979 }
980 
981 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
982 			   uint32_t thread, uint32_t regno,
983 			   uint32_t num, uint32_t *out)
984 {
985 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
986 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
987 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
988 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
989 		(SQ_IND_INDEX__AUTO_INCR_MASK));
990 	while (num--)
991 		*(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
992 }
993 
994 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
995 {
996 	/* in gfx11 the SIMD_ID is specified as part of the INSTANCE
997 	 * field when performing a select_se_sh so it should be
998 	 * zero here */
999 	WARN_ON(simd != 0);
1000 
1001 	/* type 3 wave data */
1002 	dst[(*no_fields)++] = 3;
1003 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
1004 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
1005 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
1006 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
1007 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
1008 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1009 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1010 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1011 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1012 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1013 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1014 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1015 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1016 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1017 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
1018 }
1019 
1020 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1021 				     uint32_t wave, uint32_t start,
1022 				     uint32_t size, uint32_t *dst)
1023 {
1024 	WARN_ON(simd != 0);
1025 
1026 	wave_read_regs(
1027 		adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1028 		dst);
1029 }
1030 
1031 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1032 				      uint32_t wave, uint32_t thread,
1033 				      uint32_t start, uint32_t size,
1034 				      uint32_t *dst)
1035 {
1036 	wave_read_regs(
1037 		adev, wave, thread,
1038 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1039 }
1040 
1041 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
1042 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
1043 {
1044 	soc21_grbm_select(adev, me, pipe, q, vm);
1045 }
1046 
1047 /* all sizes are in bytes */
1048 #define MQD_SHADOW_BASE_SIZE      73728
1049 #define MQD_SHADOW_BASE_ALIGNMENT 256
1050 #define MQD_FWWORKAREA_SIZE       484
1051 #define MQD_FWWORKAREA_ALIGNMENT  256
1052 
1053 static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
1054 					 struct amdgpu_gfx_shadow_info *shadow_info)
1055 {
1056 	shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
1057 	shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
1058 	shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
1059 	shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
1060 }
1061 
1062 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
1063 					 struct amdgpu_gfx_shadow_info *shadow_info,
1064 					 bool skip_check)
1065 {
1066 	if (adev->gfx.cp_gfx_shadow || skip_check) {
1067 		gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
1068 		return 0;
1069 	} else {
1070 		memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
1071 		return -ENOTSUPP;
1072 	}
1073 }
1074 
1075 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
1076 	.get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
1077 	.select_se_sh = &gfx_v11_0_select_se_sh,
1078 	.read_wave_data = &gfx_v11_0_read_wave_data,
1079 	.read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
1080 	.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
1081 	.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
1082 	.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
1083 	.get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
1084 };
1085 
1086 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
1087 {
1088 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1089 	case IP_VERSION(11, 0, 0):
1090 	case IP_VERSION(11, 0, 2):
1091 		adev->gfx.config.max_hw_contexts = 8;
1092 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1093 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1094 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1095 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1096 		break;
1097 	case IP_VERSION(11, 0, 3):
1098 		adev->gfx.ras = &gfx_v11_0_3_ras;
1099 		adev->gfx.config.max_hw_contexts = 8;
1100 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1101 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1102 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1103 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1104 		break;
1105 	case IP_VERSION(11, 0, 1):
1106 	case IP_VERSION(11, 0, 4):
1107 	case IP_VERSION(11, 5, 0):
1108 	case IP_VERSION(11, 5, 1):
1109 	case IP_VERSION(11, 5, 2):
1110 	case IP_VERSION(11, 5, 3):
1111 		adev->gfx.config.max_hw_contexts = 8;
1112 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1113 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1114 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
1115 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
1116 		break;
1117 	default:
1118 		BUG();
1119 		break;
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1126 				   int me, int pipe, int queue)
1127 {
1128 	struct amdgpu_ring *ring;
1129 	unsigned int irq_type;
1130 	unsigned int hw_prio;
1131 
1132 	ring = &adev->gfx.gfx_ring[ring_id];
1133 
1134 	ring->me = me;
1135 	ring->pipe = pipe;
1136 	ring->queue = queue;
1137 
1138 	ring->ring_obj = NULL;
1139 	ring->use_doorbell = true;
1140 	if (adev->gfx.disable_kq) {
1141 		ring->no_scheduler = true;
1142 		ring->no_user_submission = true;
1143 	}
1144 
1145 	if (!ring_id)
1146 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1147 	else
1148 		ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1149 	ring->vm_hub = AMDGPU_GFXHUB(0);
1150 	sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1151 
1152 	irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1153 	hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ?
1154 		AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1155 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1156 				hw_prio, NULL);
1157 }
1158 
1159 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1160 				       int mec, int pipe, int queue)
1161 {
1162 	int r;
1163 	unsigned irq_type;
1164 	struct amdgpu_ring *ring;
1165 	unsigned int hw_prio;
1166 
1167 	ring = &adev->gfx.compute_ring[ring_id];
1168 
1169 	/* mec0 is me1 */
1170 	ring->me = mec + 1;
1171 	ring->pipe = pipe;
1172 	ring->queue = queue;
1173 
1174 	ring->ring_obj = NULL;
1175 	ring->use_doorbell = true;
1176 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1177 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1178 				+ (ring_id * GFX11_MEC_HPD_SIZE);
1179 	ring->vm_hub = AMDGPU_GFXHUB(0);
1180 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1181 
1182 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1183 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1184 		+ ring->pipe;
1185 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1186 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1187 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1188 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1189 			     hw_prio, NULL);
1190 	if (r)
1191 		return r;
1192 
1193 	return 0;
1194 }
1195 
1196 static struct {
1197 	SOC21_FIRMWARE_ID	id;
1198 	unsigned int		offset;
1199 	unsigned int		size;
1200 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
1201 
1202 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
1203 {
1204 	RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
1205 
1206 	while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
1207 			(ucode->id < SOC21_FIRMWARE_ID_MAX)) {
1208 		rlc_autoload_info[ucode->id].id = ucode->id;
1209 		rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
1210 		rlc_autoload_info[ucode->id].size = ucode->size * 4;
1211 
1212 		ucode++;
1213 	}
1214 }
1215 
1216 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
1217 {
1218 	uint32_t total_size = 0;
1219 	SOC21_FIRMWARE_ID id;
1220 
1221 	gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
1222 
1223 	for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
1224 		total_size += rlc_autoload_info[id].size;
1225 
1226 	/* In case the offset in rlc toc ucode is aligned */
1227 	if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
1228 		total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
1229 			rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
1230 
1231 	return total_size;
1232 }
1233 
1234 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1235 {
1236 	int r;
1237 	uint32_t total_size;
1238 
1239 	total_size = gfx_v11_0_calc_toc_total_size(adev);
1240 
1241 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1242 				      AMDGPU_GEM_DOMAIN_VRAM |
1243 				      AMDGPU_GEM_DOMAIN_GTT,
1244 				      &adev->gfx.rlc.rlc_autoload_bo,
1245 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
1246 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1247 
1248 	if (r) {
1249 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1250 		return r;
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1257 					      SOC21_FIRMWARE_ID id,
1258 			    		      const void *fw_data,
1259 					      uint32_t fw_size,
1260 					      uint32_t *fw_autoload_mask)
1261 {
1262 	uint32_t toc_offset;
1263 	uint32_t toc_fw_size;
1264 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1265 
1266 	if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1267 		return;
1268 
1269 	toc_offset = rlc_autoload_info[id].offset;
1270 	toc_fw_size = rlc_autoload_info[id].size;
1271 
1272 	if (fw_size == 0)
1273 		fw_size = toc_fw_size;
1274 
1275 	if (fw_size > toc_fw_size)
1276 		fw_size = toc_fw_size;
1277 
1278 	memcpy(ptr + toc_offset, fw_data, fw_size);
1279 
1280 	if (fw_size < toc_fw_size)
1281 		memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1282 
1283 	if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1284 		*(uint64_t *)fw_autoload_mask |= 1ULL << id;
1285 }
1286 
1287 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1288 							uint32_t *fw_autoload_mask)
1289 {
1290 	void *data;
1291 	uint32_t size;
1292 	uint64_t *toc_ptr;
1293 
1294 	*(uint64_t *)fw_autoload_mask |= 0x1;
1295 
1296 	DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1297 
1298 	data = adev->psp.toc.start_addr;
1299 	size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1300 
1301 	toc_ptr = (uint64_t *)data + size / 8 - 1;
1302 	*toc_ptr = *(uint64_t *)fw_autoload_mask;
1303 
1304 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1305 					data, size, fw_autoload_mask);
1306 }
1307 
1308 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1309 							uint32_t *fw_autoload_mask)
1310 {
1311 	const __le32 *fw_data;
1312 	uint32_t fw_size;
1313 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1314 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1315 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1316 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1317 	uint16_t version_major, version_minor;
1318 
1319 	if (adev->gfx.rs64_enable) {
1320 		/* pfp ucode */
1321 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1322 			adev->gfx.pfp_fw->data;
1323 		/* instruction */
1324 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1325 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1326 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1327 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1328 						fw_data, fw_size, fw_autoload_mask);
1329 		/* data */
1330 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1331 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1332 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1333 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1334 						fw_data, fw_size, fw_autoload_mask);
1335 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1336 						fw_data, fw_size, fw_autoload_mask);
1337 		/* me ucode */
1338 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1339 			adev->gfx.me_fw->data;
1340 		/* instruction */
1341 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1342 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1343 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1344 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1345 						fw_data, fw_size, fw_autoload_mask);
1346 		/* data */
1347 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1348 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1349 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1350 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1351 						fw_data, fw_size, fw_autoload_mask);
1352 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1353 						fw_data, fw_size, fw_autoload_mask);
1354 		/* mec ucode */
1355 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1356 			adev->gfx.mec_fw->data;
1357 		/* instruction */
1358 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1359 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1360 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1361 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1362 						fw_data, fw_size, fw_autoload_mask);
1363 		/* data */
1364 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1365 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1366 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1367 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1368 						fw_data, fw_size, fw_autoload_mask);
1369 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1370 						fw_data, fw_size, fw_autoload_mask);
1371 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1372 						fw_data, fw_size, fw_autoload_mask);
1373 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1374 						fw_data, fw_size, fw_autoload_mask);
1375 	} else {
1376 		/* pfp ucode */
1377 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1378 			adev->gfx.pfp_fw->data;
1379 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1380 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1381 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1382 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1383 						fw_data, fw_size, fw_autoload_mask);
1384 
1385 		/* me ucode */
1386 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1387 			adev->gfx.me_fw->data;
1388 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1389 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1390 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1391 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1392 						fw_data, fw_size, fw_autoload_mask);
1393 
1394 		/* mec ucode */
1395 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1396 			adev->gfx.mec_fw->data;
1397 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1398 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1399 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1400 			cp_hdr->jt_size * 4;
1401 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1402 						fw_data, fw_size, fw_autoload_mask);
1403 	}
1404 
1405 	/* rlc ucode */
1406 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1407 		adev->gfx.rlc_fw->data;
1408 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1409 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1410 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1411 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1412 					fw_data, fw_size, fw_autoload_mask);
1413 
1414 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1415 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1416 	if (version_major == 2) {
1417 		if (version_minor >= 2) {
1418 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1419 
1420 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1421 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1422 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1423 			gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1424 					fw_data, fw_size, fw_autoload_mask);
1425 
1426 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1427 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1428 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1429 			gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1430 					fw_data, fw_size, fw_autoload_mask);
1431 		}
1432 	}
1433 }
1434 
1435 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1436 							uint32_t *fw_autoload_mask)
1437 {
1438 	const __le32 *fw_data;
1439 	uint32_t fw_size;
1440 	const struct sdma_firmware_header_v2_0 *sdma_hdr;
1441 
1442 	sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1443 		adev->sdma.instance[0].fw->data;
1444 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1445 			le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1446 	fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1447 
1448 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1449 			SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1450 
1451 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1452 			le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1453 	fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1454 
1455 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1456 			SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1457 }
1458 
1459 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1460 							uint32_t *fw_autoload_mask)
1461 {
1462 	const __le32 *fw_data;
1463 	unsigned fw_size;
1464 	const struct mes_firmware_header_v1_0 *mes_hdr;
1465 	int pipe, ucode_id, data_id;
1466 
1467 	for (pipe = 0; pipe < 2; pipe++) {
1468 		if (pipe==0) {
1469 			ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1470 			data_id  = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1471 		} else {
1472 			ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1473 			data_id  = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1474 		}
1475 
1476 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1477 			adev->mes.fw[pipe]->data;
1478 
1479 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1480 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1481 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1482 
1483 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1484 				ucode_id, fw_data, fw_size, fw_autoload_mask);
1485 
1486 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1487 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1488 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1489 
1490 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1491 				data_id, fw_data, fw_size, fw_autoload_mask);
1492 	}
1493 }
1494 
1495 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1496 {
1497 	uint32_t rlc_g_offset, rlc_g_size;
1498 	uint64_t gpu_addr;
1499 	uint32_t autoload_fw_id[2];
1500 
1501 	memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1502 
1503 	/* RLC autoload sequence 2: copy ucode */
1504 	gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1505 	gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1506 	gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1507 	gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1508 
1509 	rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1510 	rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1511 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1512 
1513 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1514 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1515 
1516 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1517 
1518 	/* RLC autoload sequence 3: load IMU fw */
1519 	if (adev->gfx.imu.funcs->load_microcode)
1520 		adev->gfx.imu.funcs->load_microcode(adev);
1521 	/* RLC autoload sequence 4 init IMU fw */
1522 	if (adev->gfx.imu.funcs->setup_imu)
1523 		adev->gfx.imu.funcs->setup_imu(adev);
1524 	if (adev->gfx.imu.funcs->start_imu)
1525 		adev->gfx.imu.funcs->start_imu(adev);
1526 
1527 	/* RLC autoload sequence 5 disable gpa mode */
1528 	gfx_v11_0_disable_gpa_mode(adev);
1529 
1530 	return 0;
1531 }
1532 
1533 static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
1534 {
1535 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
1536 	uint32_t *ptr;
1537 	uint32_t inst;
1538 
1539 	ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
1540 	if (!ptr) {
1541 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1542 		adev->gfx.ip_dump_core = NULL;
1543 	} else {
1544 		adev->gfx.ip_dump_core = ptr;
1545 	}
1546 
1547 	/* Allocate memory for compute queue registers for all the instances */
1548 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
1549 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1550 		adev->gfx.mec.num_queue_per_pipe;
1551 
1552 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1553 	if (!ptr) {
1554 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1555 		adev->gfx.ip_dump_compute_queues = NULL;
1556 	} else {
1557 		adev->gfx.ip_dump_compute_queues = ptr;
1558 	}
1559 
1560 	/* Allocate memory for gfx queue registers for all the instances */
1561 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
1562 	inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me *
1563 		adev->gfx.me.num_queue_per_pipe;
1564 
1565 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1566 	if (!ptr) {
1567 		DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
1568 		adev->gfx.ip_dump_gfx_queues = NULL;
1569 	} else {
1570 		adev->gfx.ip_dump_gfx_queues = ptr;
1571 	}
1572 }
1573 
1574 static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
1575 {
1576 	int i, j, k, r, ring_id;
1577 	int xcc_id = 0;
1578 	struct amdgpu_device *adev = ip_block->adev;
1579 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
1580 
1581 	INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
1582 
1583 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1584 	case IP_VERSION(11, 0, 0):
1585 	case IP_VERSION(11, 0, 1):
1586 	case IP_VERSION(11, 0, 2):
1587 	case IP_VERSION(11, 0, 3):
1588 	case IP_VERSION(11, 0, 4):
1589 	case IP_VERSION(11, 5, 0):
1590 	case IP_VERSION(11, 5, 1):
1591 	case IP_VERSION(11, 5, 2):
1592 	case IP_VERSION(11, 5, 3):
1593 		adev->gfx.me.num_me = 1;
1594 		adev->gfx.me.num_pipe_per_me = 1;
1595 		adev->gfx.me.num_queue_per_pipe = 2;
1596 		adev->gfx.mec.num_mec = 1;
1597 		adev->gfx.mec.num_pipe_per_mec = 4;
1598 		adev->gfx.mec.num_queue_per_pipe = 4;
1599 		break;
1600 	default:
1601 		adev->gfx.me.num_me = 1;
1602 		adev->gfx.me.num_pipe_per_me = 1;
1603 		adev->gfx.me.num_queue_per_pipe = 1;
1604 		adev->gfx.mec.num_mec = 1;
1605 		adev->gfx.mec.num_pipe_per_mec = 4;
1606 		adev->gfx.mec.num_queue_per_pipe = 8;
1607 		break;
1608 	}
1609 
1610 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1611 	case IP_VERSION(11, 0, 0):
1612 	case IP_VERSION(11, 0, 2):
1613 	case IP_VERSION(11, 0, 3):
1614 		if (!adev->gfx.disable_uq &&
1615 		    adev->gfx.me_fw_version  >= 2420 &&
1616 		    adev->gfx.pfp_fw_version >= 2580 &&
1617 		    adev->gfx.mec_fw_version >= 2650 &&
1618 		    adev->mes.fw_version[0] >= 120) {
1619 			adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
1620 			adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
1621 		}
1622 		break;
1623 	case IP_VERSION(11, 0, 1):
1624 	case IP_VERSION(11, 0, 4):
1625 	case IP_VERSION(11, 5, 0):
1626 	case IP_VERSION(11, 5, 1):
1627 	case IP_VERSION(11, 5, 2):
1628 	case IP_VERSION(11, 5, 3):
1629 		/* add firmware version checks here */
1630 		if (0 && !adev->gfx.disable_uq) {
1631 			adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
1632 			adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
1633 		}
1634 		break;
1635 	default:
1636 		break;
1637 	}
1638 
1639 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1640 	case IP_VERSION(11, 0, 0):
1641 	case IP_VERSION(11, 0, 2):
1642 	case IP_VERSION(11, 0, 3):
1643 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1644 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1645 		if (adev->gfx.me_fw_version  >= 2280 &&
1646 		    adev->gfx.pfp_fw_version >= 2370 &&
1647 		    adev->gfx.mec_fw_version >= 2450  &&
1648 		    adev->mes.fw_version[0] >= 99) {
1649 			adev->gfx.enable_cleaner_shader = true;
1650 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1651 			if (r) {
1652 				adev->gfx.enable_cleaner_shader = false;
1653 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1654 			}
1655 		}
1656 		break;
1657 	case IP_VERSION(11, 5, 0):
1658 	case IP_VERSION(11, 5, 1):
1659 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1660 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1661 		if (adev->gfx.mec_fw_version >= 26 &&
1662 		    adev->mes.fw_version[0] >= 114) {
1663 			adev->gfx.enable_cleaner_shader = true;
1664 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1665 			if (r) {
1666 				adev->gfx.enable_cleaner_shader = false;
1667 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1668 			}
1669 		}
1670 		break;
1671 	case IP_VERSION(11, 5, 2):
1672 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1673 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1674 		if (adev->gfx.me_fw_version  >= 12 &&
1675 		    adev->gfx.pfp_fw_version >= 15 &&
1676 		    adev->gfx.mec_fw_version >= 15) {
1677 			adev->gfx.enable_cleaner_shader = true;
1678 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1679 			if (r) {
1680 				adev->gfx.enable_cleaner_shader = false;
1681 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1682 			}
1683 		}
1684 		break;
1685 	case IP_VERSION(11, 5, 3):
1686 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1687 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1688 		if (adev->gfx.me_fw_version  >= 7 &&
1689 		    adev->gfx.pfp_fw_version >= 8 &&
1690 		    adev->gfx.mec_fw_version >= 8) {
1691 			adev->gfx.enable_cleaner_shader = true;
1692 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1693 			if (r) {
1694 				adev->gfx.enable_cleaner_shader = false;
1695 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1696 			}
1697 		}
1698 		break;
1699 	default:
1700 		adev->gfx.enable_cleaner_shader = false;
1701 		break;
1702 	}
1703 
1704 	/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1705 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) &&
1706 	    amdgpu_sriov_is_pp_one_vf(adev))
1707 		adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1708 
1709 	/* EOP Event */
1710 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1711 			      GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1712 			      &adev->gfx.eop_irq);
1713 	if (r)
1714 		return r;
1715 
1716 	/* Bad opcode Event */
1717 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1718 			      GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
1719 			      &adev->gfx.bad_op_irq);
1720 	if (r)
1721 		return r;
1722 
1723 	/* Privileged reg */
1724 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1725 			      GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1726 			      &adev->gfx.priv_reg_irq);
1727 	if (r)
1728 		return r;
1729 
1730 	/* Privileged inst */
1731 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1732 			      GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1733 			      &adev->gfx.priv_inst_irq);
1734 	if (r)
1735 		return r;
1736 
1737 	/* FED error */
1738 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1739 				  GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
1740 				  &adev->gfx.rlc_gc_fed_irq);
1741 	if (r)
1742 		return r;
1743 
1744 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1745 
1746 	gfx_v11_0_me_init(adev);
1747 
1748 	r = gfx_v11_0_rlc_init(adev);
1749 	if (r) {
1750 		DRM_ERROR("Failed to init rlc BOs!\n");
1751 		return r;
1752 	}
1753 
1754 	r = gfx_v11_0_mec_init(adev);
1755 	if (r) {
1756 		DRM_ERROR("Failed to init MEC BOs!\n");
1757 		return r;
1758 	}
1759 
1760 	if (adev->gfx.num_gfx_rings) {
1761 		ring_id = 0;
1762 		/* set up the gfx ring */
1763 		for (i = 0; i < adev->gfx.me.num_me; i++) {
1764 			for (j = 0; j < num_queue_per_pipe; j++) {
1765 				for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1766 					if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1767 						continue;
1768 
1769 					r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1770 								    i, k, j);
1771 					if (r)
1772 						return r;
1773 					ring_id++;
1774 				}
1775 			}
1776 		}
1777 	}
1778 
1779 	if (adev->gfx.num_compute_rings) {
1780 		ring_id = 0;
1781 		/* set up the compute queues - allocate horizontally across pipes */
1782 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1783 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1784 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1785 					if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
1786 									     k, j))
1787 						continue;
1788 
1789 					r = gfx_v11_0_compute_ring_init(adev, ring_id,
1790 									i, k, j);
1791 					if (r)
1792 						return r;
1793 
1794 					ring_id++;
1795 				}
1796 			}
1797 		}
1798 	}
1799 
1800 	adev->gfx.gfx_supported_reset =
1801 		amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
1802 	adev->gfx.compute_supported_reset =
1803 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1804 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1805 	case IP_VERSION(11, 0, 0):
1806 	case IP_VERSION(11, 0, 2):
1807 	case IP_VERSION(11, 0, 3):
1808 		if ((adev->gfx.me_fw_version >= 2280) &&
1809 		    (adev->gfx.mec_fw_version >= 2410) &&
1810 		    !amdgpu_sriov_vf(adev)) {
1811 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1812 			adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1813 		}
1814 		break;
1815 	default:
1816 		if (!amdgpu_sriov_vf(adev)) {
1817 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1818 			adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1819 		}
1820 		break;
1821 	}
1822 
1823 	if (!adev->enable_mes_kiq) {
1824 		r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
1825 		if (r) {
1826 			DRM_ERROR("Failed to init KIQ BOs!\n");
1827 			return r;
1828 		}
1829 
1830 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1831 		if (r)
1832 			return r;
1833 	}
1834 
1835 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
1836 	if (r)
1837 		return r;
1838 
1839 	/* allocate visible FB for rlc auto-loading fw */
1840 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1841 		r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1842 		if (r)
1843 			return r;
1844 	}
1845 
1846 	r = gfx_v11_0_gpu_early_init(adev);
1847 	if (r)
1848 		return r;
1849 
1850 	if (amdgpu_gfx_ras_sw_init(adev)) {
1851 		dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
1852 		return -EINVAL;
1853 	}
1854 
1855 	gfx_v11_0_alloc_ip_dump(adev);
1856 
1857 	r = amdgpu_gfx_sysfs_init(adev);
1858 	if (r)
1859 		return r;
1860 
1861 	return 0;
1862 }
1863 
1864 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1865 {
1866 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1867 			      &adev->gfx.pfp.pfp_fw_gpu_addr,
1868 			      (void **)&adev->gfx.pfp.pfp_fw_ptr);
1869 
1870 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1871 			      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1872 			      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1873 }
1874 
1875 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1876 {
1877 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1878 			      &adev->gfx.me.me_fw_gpu_addr,
1879 			      (void **)&adev->gfx.me.me_fw_ptr);
1880 
1881 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1882 			       &adev->gfx.me.me_fw_data_gpu_addr,
1883 			       (void **)&adev->gfx.me.me_fw_data_ptr);
1884 }
1885 
1886 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1887 {
1888 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1889 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1890 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1891 }
1892 
1893 static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
1894 {
1895 	int i;
1896 	struct amdgpu_device *adev = ip_block->adev;
1897 
1898 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1899 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1900 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1901 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1902 
1903 	amdgpu_gfx_mqd_sw_fini(adev, 0);
1904 
1905 	if (!adev->enable_mes_kiq) {
1906 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1907 		amdgpu_gfx_kiq_fini(adev, 0);
1908 	}
1909 
1910 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
1911 
1912 	gfx_v11_0_pfp_fini(adev);
1913 	gfx_v11_0_me_fini(adev);
1914 	gfx_v11_0_rlc_fini(adev);
1915 	gfx_v11_0_mec_fini(adev);
1916 
1917 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1918 		gfx_v11_0_rlc_autoload_buffer_fini(adev);
1919 
1920 	gfx_v11_0_free_microcode(adev);
1921 
1922 	amdgpu_gfx_sysfs_fini(adev);
1923 
1924 	kfree(adev->gfx.ip_dump_core);
1925 	kfree(adev->gfx.ip_dump_compute_queues);
1926 	kfree(adev->gfx.ip_dump_gfx_queues);
1927 
1928 	return 0;
1929 }
1930 
1931 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1932 				   u32 sh_num, u32 instance, int xcc_id)
1933 {
1934 	u32 data;
1935 
1936 	if (instance == 0xffffffff)
1937 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1938 				     INSTANCE_BROADCAST_WRITES, 1);
1939 	else
1940 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1941 				     instance);
1942 
1943 	if (se_num == 0xffffffff)
1944 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1945 				     1);
1946 	else
1947 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1948 
1949 	if (sh_num == 0xffffffff)
1950 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1951 				     1);
1952 	else
1953 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1954 
1955 	WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1956 }
1957 
1958 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1959 {
1960 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1961 
1962 	gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
1963 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1964 					   CC_GC_SA_UNIT_DISABLE,
1965 					   SA_DISABLE);
1966 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
1967 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1968 						 GC_USER_SA_UNIT_DISABLE,
1969 						 SA_DISABLE);
1970 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1971 					    adev->gfx.config.max_shader_engines);
1972 
1973 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1974 }
1975 
1976 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1977 {
1978 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1979 	u32 rb_mask;
1980 
1981 	gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1982 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1983 					    CC_RB_BACKEND_DISABLE,
1984 					    BACKEND_DISABLE);
1985 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1986 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1987 						 GC_USER_RB_BACKEND_DISABLE,
1988 						 BACKEND_DISABLE);
1989 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1990 					    adev->gfx.config.max_shader_engines);
1991 
1992 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1993 }
1994 
1995 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1996 {
1997 	u32 rb_bitmap_per_sa;
1998 	u32 rb_bitmap_width_per_sa;
1999 	u32 max_sa;
2000 	u32 active_sa_bitmap;
2001 	u32 global_active_rb_bitmap;
2002 	u32 active_rb_bitmap = 0;
2003 	u32 i;
2004 
2005 	/* query sa bitmap from SA_UNIT_DISABLE registers */
2006 	active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
2007 	/* query rb bitmap from RB_BACKEND_DISABLE registers */
2008 	global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
2009 
2010 	/* generate active rb bitmap according to active sa bitmap */
2011 	max_sa = adev->gfx.config.max_shader_engines *
2012 		 adev->gfx.config.max_sh_per_se;
2013 	rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
2014 				 adev->gfx.config.max_sh_per_se;
2015 	rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
2016 
2017 	for (i = 0; i < max_sa; i++) {
2018 		if (active_sa_bitmap & (1 << i))
2019 			active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
2020 	}
2021 
2022 	active_rb_bitmap &= global_active_rb_bitmap;
2023 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
2024 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
2025 }
2026 
2027 #define DEFAULT_SH_MEM_BASES	(0x6000)
2028 #define LDS_APP_BASE           0x1
2029 #define SCRATCH_APP_BASE       0x2
2030 
2031 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
2032 {
2033 	int i;
2034 	uint32_t sh_mem_bases;
2035 	uint32_t data;
2036 
2037 	/*
2038 	 * Configure apertures:
2039 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2040 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2041 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2042 	 */
2043 	sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
2044 			SCRATCH_APP_BASE;
2045 
2046 	mutex_lock(&adev->srbm_mutex);
2047 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2048 		soc21_grbm_select(adev, 0, 0, 0, i);
2049 		/* CP and shaders */
2050 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
2051 		WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
2052 
2053 		/* Enable trap for each kfd vmid. */
2054 		data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
2055 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
2056 		WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
2057 	}
2058 	soc21_grbm_select(adev, 0, 0, 0, 0);
2059 	mutex_unlock(&adev->srbm_mutex);
2060 
2061 	/*
2062 	 * Initialize all compute VMIDs to have no GDS, GWS, or OA
2063 	 * access. These should be enabled by FW for target VMIDs.
2064 	 */
2065 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2066 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
2067 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
2068 		WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
2069 		WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
2070 	}
2071 }
2072 
2073 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
2074 {
2075 	int vmid;
2076 
2077 	/*
2078 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2079 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
2080 	 * the driver can enable them for graphics. VMID0 should maintain
2081 	 * access so that HWS firmware can save/restore entries.
2082 	 */
2083 	for (vmid = 1; vmid < 16; vmid++) {
2084 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
2085 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
2086 		WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
2087 		WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
2088 	}
2089 }
2090 
2091 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
2092 {
2093 	/* TODO: harvest feature to be added later. */
2094 }
2095 
2096 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
2097 {
2098 	/* TCCs are global (not instanced). */
2099 	uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
2100 			       RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
2101 
2102 	adev->gfx.config.tcc_disabled_mask =
2103 		REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
2104 		(REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
2105 }
2106 
2107 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
2108 {
2109 	u32 tmp;
2110 	int i;
2111 
2112 	if (!amdgpu_sriov_vf(adev))
2113 		WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2114 
2115 	gfx_v11_0_setup_rb(adev);
2116 	gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
2117 	gfx_v11_0_get_tcc_info(adev);
2118 	adev->gfx.config.pa_sc_tile_steering_override = 0;
2119 
2120 	/* Set whether texture coordinate truncation is conformant. */
2121 	tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
2122 	adev->gfx.config.ta_cntl2_truncate_coord_mode =
2123 		REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
2124 
2125 	/* XXX SH_MEM regs */
2126 	/* where to put LDS, scratch, GPUVM in FSA64 space */
2127 	mutex_lock(&adev->srbm_mutex);
2128 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
2129 		soc21_grbm_select(adev, 0, 0, 0, i);
2130 		/* CP and shaders */
2131 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
2132 		if (i != 0) {
2133 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2134 				(adev->gmc.private_aperture_start >> 48));
2135 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2136 				(adev->gmc.shared_aperture_start >> 48));
2137 			WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
2138 		}
2139 	}
2140 	soc21_grbm_select(adev, 0, 0, 0, 0);
2141 
2142 	mutex_unlock(&adev->srbm_mutex);
2143 
2144 	gfx_v11_0_init_compute_vmid(adev);
2145 	gfx_v11_0_init_gds_vmid(adev);
2146 }
2147 
2148 static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev,
2149 				      int me, int pipe)
2150 {
2151 	if (me != 0)
2152 		return 0;
2153 
2154 	switch (pipe) {
2155 	case 0:
2156 		return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
2157 	case 1:
2158 		return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
2159 	default:
2160 		return 0;
2161 	}
2162 }
2163 
2164 static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev,
2165 				      int me, int pipe)
2166 {
2167 	/*
2168 	 * amdgpu controls only the first MEC. That's why this function only
2169 	 * handles the setting of interrupts for this specific MEC. All other
2170 	 * pipes' interrupts are set by amdkfd.
2171 	 */
2172 	if (me != 1)
2173 		return 0;
2174 
2175 	switch (pipe) {
2176 	case 0:
2177 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
2178 	case 1:
2179 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
2180 	case 2:
2181 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
2182 	case 3:
2183 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
2184 	default:
2185 		return 0;
2186 	}
2187 }
2188 
2189 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2190 					       bool enable)
2191 {
2192 	u32 tmp, cp_int_cntl_reg;
2193 	int i, j;
2194 
2195 	if (amdgpu_sriov_vf(adev))
2196 		return;
2197 
2198 	for (i = 0; i < adev->gfx.me.num_me; i++) {
2199 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
2200 			cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
2201 
2202 			if (cp_int_cntl_reg) {
2203 				tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
2204 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
2205 						    enable ? 1 : 0);
2206 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
2207 						    enable ? 1 : 0);
2208 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
2209 						    enable ? 1 : 0);
2210 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
2211 						    enable ? 1 : 0);
2212 				WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
2213 			}
2214 		}
2215 	}
2216 }
2217 
2218 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
2219 {
2220 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2221 
2222 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
2223 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2224 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
2225 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2226 	WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
2227 
2228 	return 0;
2229 }
2230 
2231 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
2232 {
2233 	u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
2234 
2235 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
2236 	WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
2237 }
2238 
2239 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
2240 {
2241 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2242 	udelay(50);
2243 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2244 	udelay(50);
2245 }
2246 
2247 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
2248 					     bool enable)
2249 {
2250 	uint32_t rlc_pg_cntl;
2251 
2252 	rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
2253 
2254 	if (!enable) {
2255 		/* RLC_PG_CNTL[23] = 0 (default)
2256 		 * RLC will wait for handshake acks with SMU
2257 		 * GFXOFF will be enabled
2258 		 * RLC_PG_CNTL[23] = 1
2259 		 * RLC will not issue any message to SMU
2260 		 * hence no handshake between SMU & RLC
2261 		 * GFXOFF will be disabled
2262 		 */
2263 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2264 	} else
2265 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2266 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
2267 }
2268 
2269 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
2270 {
2271 	/* TODO: enable rlc & smu handshake until smu
2272 	 * and gfxoff feature works as expected */
2273 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
2274 		gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
2275 
2276 	WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2277 	udelay(50);
2278 }
2279 
2280 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
2281 {
2282 	uint32_t tmp;
2283 
2284 	/* enable Save Restore Machine */
2285 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
2286 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2287 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
2288 	WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
2289 }
2290 
2291 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
2292 {
2293 	const struct rlc_firmware_header_v2_0 *hdr;
2294 	const __le32 *fw_data;
2295 	unsigned i, fw_size;
2296 
2297 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2298 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2299 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2300 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2301 
2302 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
2303 		     RLCG_UCODE_LOADING_START_ADDRESS);
2304 
2305 	for (i = 0; i < fw_size; i++)
2306 		WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
2307 			     le32_to_cpup(fw_data++));
2308 
2309 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2310 }
2311 
2312 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
2313 {
2314 	const struct rlc_firmware_header_v2_2 *hdr;
2315 	const __le32 *fw_data;
2316 	unsigned i, fw_size;
2317 	u32 tmp;
2318 
2319 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
2320 
2321 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2322 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
2323 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
2324 
2325 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
2326 
2327 	for (i = 0; i < fw_size; i++) {
2328 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2329 			msleep(1);
2330 		WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
2331 				le32_to_cpup(fw_data++));
2332 	}
2333 
2334 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2335 
2336 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2337 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
2338 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
2339 
2340 	WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
2341 	for (i = 0; i < fw_size; i++) {
2342 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2343 			msleep(1);
2344 		WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
2345 				le32_to_cpup(fw_data++));
2346 	}
2347 
2348 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2349 
2350 	tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
2351 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
2352 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
2353 	WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
2354 }
2355 
2356 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
2357 {
2358 	const struct rlc_firmware_header_v2_3 *hdr;
2359 	const __le32 *fw_data;
2360 	unsigned i, fw_size;
2361 	u32 tmp;
2362 
2363 	hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
2364 
2365 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2366 			le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
2367 	fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
2368 
2369 	WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
2370 
2371 	for (i = 0; i < fw_size; i++) {
2372 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2373 			msleep(1);
2374 		WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
2375 				le32_to_cpup(fw_data++));
2376 	}
2377 
2378 	WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
2379 
2380 	tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
2381 	tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
2382 	WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
2383 
2384 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2385 			le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
2386 	fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
2387 
2388 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
2389 
2390 	for (i = 0; i < fw_size; i++) {
2391 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2392 			msleep(1);
2393 		WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
2394 				le32_to_cpup(fw_data++));
2395 	}
2396 
2397 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
2398 
2399 	tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
2400 	tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
2401 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
2402 }
2403 
2404 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
2405 {
2406 	const struct rlc_firmware_header_v2_0 *hdr;
2407 	uint16_t version_major;
2408 	uint16_t version_minor;
2409 
2410 	if (!adev->gfx.rlc_fw)
2411 		return -EINVAL;
2412 
2413 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2414 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
2415 
2416 	version_major = le16_to_cpu(hdr->header.header_version_major);
2417 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
2418 
2419 	if (version_major == 2) {
2420 		gfx_v11_0_load_rlcg_microcode(adev);
2421 		if (amdgpu_dpm == 1) {
2422 			if (version_minor >= 2)
2423 				gfx_v11_0_load_rlc_iram_dram_microcode(adev);
2424 			if (version_minor == 3)
2425 				gfx_v11_0_load_rlcp_rlcv_microcode(adev);
2426 		}
2427 
2428 		return 0;
2429 	}
2430 
2431 	return -EINVAL;
2432 }
2433 
2434 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
2435 {
2436 	int r;
2437 
2438 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2439 		gfx_v11_0_init_csb(adev);
2440 
2441 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
2442 			gfx_v11_0_rlc_enable_srm(adev);
2443 	} else {
2444 		if (amdgpu_sriov_vf(adev)) {
2445 			gfx_v11_0_init_csb(adev);
2446 			return 0;
2447 		}
2448 
2449 		adev->gfx.rlc.funcs->stop(adev);
2450 
2451 		/* disable CG */
2452 		WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
2453 
2454 		/* disable PG */
2455 		WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
2456 
2457 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2458 			/* legacy rlc firmware loading */
2459 			r = gfx_v11_0_rlc_load_microcode(adev);
2460 			if (r)
2461 				return r;
2462 		}
2463 
2464 		gfx_v11_0_init_csb(adev);
2465 
2466 		adev->gfx.rlc.funcs->start(adev);
2467 	}
2468 	return 0;
2469 }
2470 
2471 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2472 {
2473 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2474 	uint32_t tmp;
2475 	int i;
2476 
2477 	/* Trigger an invalidation of the L1 instruction caches */
2478 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2479 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2480 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2481 
2482 	/* Wait for invalidation complete */
2483 	for (i = 0; i < usec_timeout; i++) {
2484 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2485 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2486 					INVALIDATE_CACHE_COMPLETE))
2487 			break;
2488 		udelay(1);
2489 	}
2490 
2491 	if (i >= usec_timeout) {
2492 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2493 		return -EINVAL;
2494 	}
2495 
2496 	if (amdgpu_emu_mode == 1)
2497 		amdgpu_device_flush_hdp(adev, NULL);
2498 
2499 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2500 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2501 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2502 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2503 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2504 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2505 
2506 	/* Program me ucode address into intruction cache address register */
2507 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2508 			lower_32_bits(addr) & 0xFFFFF000);
2509 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2510 			upper_32_bits(addr));
2511 
2512 	return 0;
2513 }
2514 
2515 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2516 {
2517 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2518 	uint32_t tmp;
2519 	int i;
2520 
2521 	/* Trigger an invalidation of the L1 instruction caches */
2522 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2523 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2524 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2525 
2526 	/* Wait for invalidation complete */
2527 	for (i = 0; i < usec_timeout; i++) {
2528 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2529 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2530 					INVALIDATE_CACHE_COMPLETE))
2531 			break;
2532 		udelay(1);
2533 	}
2534 
2535 	if (i >= usec_timeout) {
2536 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2537 		return -EINVAL;
2538 	}
2539 
2540 	if (amdgpu_emu_mode == 1)
2541 		amdgpu_device_flush_hdp(adev, NULL);
2542 
2543 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2544 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2545 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2546 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2547 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2548 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2549 
2550 	/* Program pfp ucode address into intruction cache address register */
2551 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2552 			lower_32_bits(addr) & 0xFFFFF000);
2553 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2554 			upper_32_bits(addr));
2555 
2556 	return 0;
2557 }
2558 
2559 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2560 {
2561 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2562 	uint32_t tmp;
2563 	int i;
2564 
2565 	/* Trigger an invalidation of the L1 instruction caches */
2566 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2567 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2568 
2569 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2570 
2571 	/* Wait for invalidation complete */
2572 	for (i = 0; i < usec_timeout; i++) {
2573 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2574 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2575 					INVALIDATE_CACHE_COMPLETE))
2576 			break;
2577 		udelay(1);
2578 	}
2579 
2580 	if (i >= usec_timeout) {
2581 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2582 		return -EINVAL;
2583 	}
2584 
2585 	if (amdgpu_emu_mode == 1)
2586 		amdgpu_device_flush_hdp(adev, NULL);
2587 
2588 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2589 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2590 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2591 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2592 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2593 
2594 	/* Program mec1 ucode address into intruction cache address register */
2595 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2596 			lower_32_bits(addr) & 0xFFFFF000);
2597 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2598 			upper_32_bits(addr));
2599 
2600 	return 0;
2601 }
2602 
2603 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2604 {
2605 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2606 	uint32_t tmp;
2607 	unsigned i, pipe_id;
2608 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2609 
2610 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2611 		adev->gfx.pfp_fw->data;
2612 
2613 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2614 		lower_32_bits(addr));
2615 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2616 		upper_32_bits(addr));
2617 
2618 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2619 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2620 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2621 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2622 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2623 
2624 	/*
2625 	 * Programming any of the CP_PFP_IC_BASE registers
2626 	 * forces invalidation of the ME L1 I$. Wait for the
2627 	 * invalidation complete
2628 	 */
2629 	for (i = 0; i < usec_timeout; i++) {
2630 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2631 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2632 			INVALIDATE_CACHE_COMPLETE))
2633 			break;
2634 		udelay(1);
2635 	}
2636 
2637 	if (i >= usec_timeout) {
2638 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2639 		return -EINVAL;
2640 	}
2641 
2642 	/* Prime the L1 instruction caches */
2643 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2644 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2645 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2646 	/* Waiting for cache primed*/
2647 	for (i = 0; i < usec_timeout; i++) {
2648 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2649 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2650 			ICACHE_PRIMED))
2651 			break;
2652 		udelay(1);
2653 	}
2654 
2655 	if (i >= usec_timeout) {
2656 		dev_err(adev->dev, "failed to prime instruction cache\n");
2657 		return -EINVAL;
2658 	}
2659 
2660 	mutex_lock(&adev->srbm_mutex);
2661 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2662 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2663 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2664 			(pfp_hdr->ucode_start_addr_hi << 30) |
2665 			(pfp_hdr->ucode_start_addr_lo >> 2));
2666 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2667 			pfp_hdr->ucode_start_addr_hi >> 2);
2668 
2669 		/*
2670 		 * Program CP_ME_CNTL to reset given PIPE to take
2671 		 * effect of CP_PFP_PRGRM_CNTR_START.
2672 		 */
2673 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2674 		if (pipe_id == 0)
2675 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2676 					PFP_PIPE0_RESET, 1);
2677 		else
2678 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2679 					PFP_PIPE1_RESET, 1);
2680 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2681 
2682 		/* Clear pfp pipe0 reset bit. */
2683 		if (pipe_id == 0)
2684 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2685 					PFP_PIPE0_RESET, 0);
2686 		else
2687 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2688 					PFP_PIPE1_RESET, 0);
2689 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2690 
2691 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2692 			lower_32_bits(addr2));
2693 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2694 			upper_32_bits(addr2));
2695 	}
2696 	soc21_grbm_select(adev, 0, 0, 0, 0);
2697 	mutex_unlock(&adev->srbm_mutex);
2698 
2699 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2700 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2701 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2702 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2703 
2704 	/* Invalidate the data caches */
2705 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2706 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2707 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2708 
2709 	for (i = 0; i < usec_timeout; i++) {
2710 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2711 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2712 			INVALIDATE_DCACHE_COMPLETE))
2713 			break;
2714 		udelay(1);
2715 	}
2716 
2717 	if (i >= usec_timeout) {
2718 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2719 		return -EINVAL;
2720 	}
2721 
2722 	return 0;
2723 }
2724 
2725 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2726 {
2727 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2728 	uint32_t tmp;
2729 	unsigned i, pipe_id;
2730 	const struct gfx_firmware_header_v2_0 *me_hdr;
2731 
2732 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2733 		adev->gfx.me_fw->data;
2734 
2735 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2736 		lower_32_bits(addr));
2737 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2738 		upper_32_bits(addr));
2739 
2740 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2741 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2742 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2743 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2744 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2745 
2746 	/*
2747 	 * Programming any of the CP_ME_IC_BASE registers
2748 	 * forces invalidation of the ME L1 I$. Wait for the
2749 	 * invalidation complete
2750 	 */
2751 	for (i = 0; i < usec_timeout; i++) {
2752 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2753 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2754 			INVALIDATE_CACHE_COMPLETE))
2755 			break;
2756 		udelay(1);
2757 	}
2758 
2759 	if (i >= usec_timeout) {
2760 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2761 		return -EINVAL;
2762 	}
2763 
2764 	/* Prime the instruction caches */
2765 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2766 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2767 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2768 
2769 	/* Waiting for instruction cache primed*/
2770 	for (i = 0; i < usec_timeout; i++) {
2771 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2772 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2773 			ICACHE_PRIMED))
2774 			break;
2775 		udelay(1);
2776 	}
2777 
2778 	if (i >= usec_timeout) {
2779 		dev_err(adev->dev, "failed to prime instruction cache\n");
2780 		return -EINVAL;
2781 	}
2782 
2783 	mutex_lock(&adev->srbm_mutex);
2784 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2785 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2786 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2787 			(me_hdr->ucode_start_addr_hi << 30) |
2788 			(me_hdr->ucode_start_addr_lo >> 2) );
2789 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2790 			me_hdr->ucode_start_addr_hi>>2);
2791 
2792 		/*
2793 		 * Program CP_ME_CNTL to reset given PIPE to take
2794 		 * effect of CP_PFP_PRGRM_CNTR_START.
2795 		 */
2796 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2797 		if (pipe_id == 0)
2798 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2799 					ME_PIPE0_RESET, 1);
2800 		else
2801 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2802 					ME_PIPE1_RESET, 1);
2803 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2804 
2805 		/* Clear pfp pipe0 reset bit. */
2806 		if (pipe_id == 0)
2807 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2808 					ME_PIPE0_RESET, 0);
2809 		else
2810 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2811 					ME_PIPE1_RESET, 0);
2812 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2813 
2814 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2815 			lower_32_bits(addr2));
2816 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2817 			upper_32_bits(addr2));
2818 	}
2819 	soc21_grbm_select(adev, 0, 0, 0, 0);
2820 	mutex_unlock(&adev->srbm_mutex);
2821 
2822 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2823 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2824 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2825 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2826 
2827 	/* Invalidate the data caches */
2828 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2829 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2830 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2831 
2832 	for (i = 0; i < usec_timeout; i++) {
2833 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2834 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2835 			INVALIDATE_DCACHE_COMPLETE))
2836 			break;
2837 		udelay(1);
2838 	}
2839 
2840 	if (i >= usec_timeout) {
2841 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2842 		return -EINVAL;
2843 	}
2844 
2845 	return 0;
2846 }
2847 
2848 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2849 {
2850 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2851 	uint32_t tmp;
2852 	unsigned i;
2853 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2854 
2855 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2856 		adev->gfx.mec_fw->data;
2857 
2858 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2859 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2860 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2861 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2862 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2863 
2864 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2865 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2866 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2867 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2868 
2869 	mutex_lock(&adev->srbm_mutex);
2870 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2871 		soc21_grbm_select(adev, 1, i, 0, 0);
2872 
2873 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2874 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2875 		     upper_32_bits(addr2));
2876 
2877 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2878 					mec_hdr->ucode_start_addr_lo >> 2 |
2879 					mec_hdr->ucode_start_addr_hi << 30);
2880 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2881 					mec_hdr->ucode_start_addr_hi >> 2);
2882 
2883 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2884 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2885 		     upper_32_bits(addr));
2886 	}
2887 	mutex_unlock(&adev->srbm_mutex);
2888 	soc21_grbm_select(adev, 0, 0, 0, 0);
2889 
2890 	/* Trigger an invalidation of the L1 instruction caches */
2891 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2892 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2893 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2894 
2895 	/* Wait for invalidation complete */
2896 	for (i = 0; i < usec_timeout; i++) {
2897 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2898 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2899 				       INVALIDATE_DCACHE_COMPLETE))
2900 			break;
2901 		udelay(1);
2902 	}
2903 
2904 	if (i >= usec_timeout) {
2905 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2906 		return -EINVAL;
2907 	}
2908 
2909 	/* Trigger an invalidation of the L1 instruction caches */
2910 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2911 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2912 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2913 
2914 	/* Wait for invalidation complete */
2915 	for (i = 0; i < usec_timeout; i++) {
2916 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2917 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2918 				       INVALIDATE_CACHE_COMPLETE))
2919 			break;
2920 		udelay(1);
2921 	}
2922 
2923 	if (i >= usec_timeout) {
2924 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2925 		return -EINVAL;
2926 	}
2927 
2928 	return 0;
2929 }
2930 
2931 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2932 {
2933 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2934 	const struct gfx_firmware_header_v2_0 *me_hdr;
2935 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2936 	uint32_t pipe_id, tmp;
2937 
2938 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2939 		adev->gfx.mec_fw->data;
2940 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2941 		adev->gfx.me_fw->data;
2942 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2943 		adev->gfx.pfp_fw->data;
2944 
2945 	/* config pfp program start addr */
2946 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2947 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2948 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2949 			(pfp_hdr->ucode_start_addr_hi << 30) |
2950 			(pfp_hdr->ucode_start_addr_lo >> 2));
2951 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2952 			pfp_hdr->ucode_start_addr_hi >> 2);
2953 	}
2954 	soc21_grbm_select(adev, 0, 0, 0, 0);
2955 
2956 	/* reset pfp pipe */
2957 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2958 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2959 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2960 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2961 
2962 	/* clear pfp pipe reset */
2963 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2964 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2965 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2966 
2967 	/* config me program start addr */
2968 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2969 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2970 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2971 			(me_hdr->ucode_start_addr_hi << 30) |
2972 			(me_hdr->ucode_start_addr_lo >> 2) );
2973 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2974 			me_hdr->ucode_start_addr_hi>>2);
2975 	}
2976 	soc21_grbm_select(adev, 0, 0, 0, 0);
2977 
2978 	/* reset me pipe */
2979 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2980 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2981 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2982 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2983 
2984 	/* clear me pipe reset */
2985 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2986 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2987 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2988 
2989 	/* config mec program start addr */
2990 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2991 		soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2992 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2993 					mec_hdr->ucode_start_addr_lo >> 2 |
2994 					mec_hdr->ucode_start_addr_hi << 30);
2995 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2996 					mec_hdr->ucode_start_addr_hi >> 2);
2997 	}
2998 	soc21_grbm_select(adev, 0, 0, 0, 0);
2999 
3000 	/* reset mec pipe */
3001 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3002 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
3003 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
3004 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
3005 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
3006 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
3007 
3008 	/* clear mec pipe reset */
3009 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
3010 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
3011 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
3012 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
3013 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
3014 }
3015 
3016 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
3017 {
3018 	uint32_t cp_status;
3019 	uint32_t bootload_status;
3020 	int i, r;
3021 	uint64_t addr, addr2;
3022 
3023 	for (i = 0; i < adev->usec_timeout; i++) {
3024 		cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
3025 
3026 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
3027 			    IP_VERSION(11, 0, 1) ||
3028 		    amdgpu_ip_version(adev, GC_HWIP, 0) ==
3029 			    IP_VERSION(11, 0, 4) ||
3030 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) ||
3031 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) ||
3032 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) ||
3033 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3))
3034 			bootload_status = RREG32_SOC15(GC, 0,
3035 					regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
3036 		else
3037 			bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
3038 
3039 		if ((cp_status == 0) &&
3040 		    (REG_GET_FIELD(bootload_status,
3041 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
3042 			break;
3043 		}
3044 		udelay(1);
3045 	}
3046 
3047 	if (i >= adev->usec_timeout) {
3048 		dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
3049 		return -ETIMEDOUT;
3050 	}
3051 
3052 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
3053 		if (adev->gfx.rs64_enable) {
3054 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3055 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
3056 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
3057 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
3058 			r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
3059 			if (r)
3060 				return r;
3061 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3062 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
3063 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
3064 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
3065 			r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
3066 			if (r)
3067 				return r;
3068 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3069 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
3070 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
3071 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
3072 			r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
3073 			if (r)
3074 				return r;
3075 		} else {
3076 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3077 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
3078 			r = gfx_v11_0_config_me_cache(adev, addr);
3079 			if (r)
3080 				return r;
3081 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3082 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
3083 			r = gfx_v11_0_config_pfp_cache(adev, addr);
3084 			if (r)
3085 				return r;
3086 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
3087 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
3088 			r = gfx_v11_0_config_mec_cache(adev, addr);
3089 			if (r)
3090 				return r;
3091 		}
3092 	}
3093 
3094 	return 0;
3095 }
3096 
3097 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3098 {
3099 	int i;
3100 	u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3101 
3102 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3103 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3104 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3105 
3106 	for (i = 0; i < adev->usec_timeout; i++) {
3107 		if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
3108 			break;
3109 		udelay(1);
3110 	}
3111 
3112 	if (i >= adev->usec_timeout)
3113 		DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
3114 
3115 	return 0;
3116 }
3117 
3118 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
3119 {
3120 	int r;
3121 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
3122 	const __le32 *fw_data;
3123 	unsigned i, fw_size;
3124 
3125 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3126 		adev->gfx.pfp_fw->data;
3127 
3128 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3129 
3130 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
3131 		le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3132 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
3133 
3134 	r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
3135 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3136 				      &adev->gfx.pfp.pfp_fw_obj,
3137 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
3138 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
3139 	if (r) {
3140 		dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
3141 		gfx_v11_0_pfp_fini(adev);
3142 		return r;
3143 	}
3144 
3145 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
3146 
3147 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
3148 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
3149 
3150 	gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
3151 
3152 	WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
3153 
3154 	for (i = 0; i < pfp_hdr->jt_size; i++)
3155 		WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
3156 			     le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
3157 
3158 	WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3159 
3160 	return 0;
3161 }
3162 
3163 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
3164 {
3165 	int r;
3166 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
3167 	const __le32 *fw_ucode, *fw_data;
3168 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3169 	uint32_t tmp;
3170 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
3171 
3172 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
3173 		adev->gfx.pfp_fw->data;
3174 
3175 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3176 
3177 	/* instruction */
3178 	fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
3179 		le32_to_cpu(pfp_hdr->ucode_offset_bytes));
3180 	fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
3181 	/* data */
3182 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
3183 		le32_to_cpu(pfp_hdr->data_offset_bytes));
3184 	fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
3185 
3186 	/* 64kb align */
3187 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3188 				      64 * 1024,
3189 				      AMDGPU_GEM_DOMAIN_VRAM |
3190 				      AMDGPU_GEM_DOMAIN_GTT,
3191 				      &adev->gfx.pfp.pfp_fw_obj,
3192 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
3193 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
3194 	if (r) {
3195 		dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
3196 		gfx_v11_0_pfp_fini(adev);
3197 		return r;
3198 	}
3199 
3200 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3201 				      64 * 1024,
3202 				      AMDGPU_GEM_DOMAIN_VRAM |
3203 				      AMDGPU_GEM_DOMAIN_GTT,
3204 				      &adev->gfx.pfp.pfp_fw_data_obj,
3205 				      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
3206 				      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
3207 	if (r) {
3208 		dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
3209 		gfx_v11_0_pfp_fini(adev);
3210 		return r;
3211 	}
3212 
3213 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
3214 	memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
3215 
3216 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
3217 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
3218 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
3219 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
3220 
3221 	if (amdgpu_emu_mode == 1)
3222 		amdgpu_device_flush_hdp(adev, NULL);
3223 
3224 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
3225 		lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
3226 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
3227 		upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
3228 
3229 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
3230 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
3231 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
3232 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
3233 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
3234 
3235 	/*
3236 	 * Programming any of the CP_PFP_IC_BASE registers
3237 	 * forces invalidation of the ME L1 I$. Wait for the
3238 	 * invalidation complete
3239 	 */
3240 	for (i = 0; i < usec_timeout; i++) {
3241 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3242 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3243 			INVALIDATE_CACHE_COMPLETE))
3244 			break;
3245 		udelay(1);
3246 	}
3247 
3248 	if (i >= usec_timeout) {
3249 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3250 		return -EINVAL;
3251 	}
3252 
3253 	/* Prime the L1 instruction caches */
3254 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3255 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
3256 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
3257 	/* Waiting for cache primed*/
3258 	for (i = 0; i < usec_timeout; i++) {
3259 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3260 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3261 			ICACHE_PRIMED))
3262 			break;
3263 		udelay(1);
3264 	}
3265 
3266 	if (i >= usec_timeout) {
3267 		dev_err(adev->dev, "failed to prime instruction cache\n");
3268 		return -EINVAL;
3269 	}
3270 
3271 	mutex_lock(&adev->srbm_mutex);
3272 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3273 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3274 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
3275 			(pfp_hdr->ucode_start_addr_hi << 30) |
3276 			(pfp_hdr->ucode_start_addr_lo >> 2) );
3277 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
3278 			pfp_hdr->ucode_start_addr_hi>>2);
3279 
3280 		/*
3281 		 * Program CP_ME_CNTL to reset given PIPE to take
3282 		 * effect of CP_PFP_PRGRM_CNTR_START.
3283 		 */
3284 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3285 		if (pipe_id == 0)
3286 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3287 					PFP_PIPE0_RESET, 1);
3288 		else
3289 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3290 					PFP_PIPE1_RESET, 1);
3291 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3292 
3293 		/* Clear pfp pipe0 reset bit. */
3294 		if (pipe_id == 0)
3295 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3296 					PFP_PIPE0_RESET, 0);
3297 		else
3298 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3299 					PFP_PIPE1_RESET, 0);
3300 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3301 
3302 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
3303 			lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3304 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
3305 			upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3306 	}
3307 	soc21_grbm_select(adev, 0, 0, 0, 0);
3308 	mutex_unlock(&adev->srbm_mutex);
3309 
3310 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3311 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3312 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3313 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3314 
3315 	/* Invalidate the data caches */
3316 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3317 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3318 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3319 
3320 	for (i = 0; i < usec_timeout; i++) {
3321 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3322 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3323 			INVALIDATE_DCACHE_COMPLETE))
3324 			break;
3325 		udelay(1);
3326 	}
3327 
3328 	if (i >= usec_timeout) {
3329 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3330 		return -EINVAL;
3331 	}
3332 
3333 	return 0;
3334 }
3335 
3336 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
3337 {
3338 	int r;
3339 	const struct gfx_firmware_header_v1_0 *me_hdr;
3340 	const __le32 *fw_data;
3341 	unsigned i, fw_size;
3342 
3343 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
3344 		adev->gfx.me_fw->data;
3345 
3346 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3347 
3348 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3349 		le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3350 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
3351 
3352 	r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
3353 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3354 				      &adev->gfx.me.me_fw_obj,
3355 				      &adev->gfx.me.me_fw_gpu_addr,
3356 				      (void **)&adev->gfx.me.me_fw_ptr);
3357 	if (r) {
3358 		dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
3359 		gfx_v11_0_me_fini(adev);
3360 		return r;
3361 	}
3362 
3363 	memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
3364 
3365 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3366 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3367 
3368 	gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
3369 
3370 	WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
3371 
3372 	for (i = 0; i < me_hdr->jt_size; i++)
3373 		WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
3374 			     le32_to_cpup(fw_data + me_hdr->jt_offset + i));
3375 
3376 	WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
3377 
3378 	return 0;
3379 }
3380 
3381 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
3382 {
3383 	int r;
3384 	const struct gfx_firmware_header_v2_0 *me_hdr;
3385 	const __le32 *fw_ucode, *fw_data;
3386 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3387 	uint32_t tmp;
3388 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
3389 
3390 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
3391 		adev->gfx.me_fw->data;
3392 
3393 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3394 
3395 	/* instruction */
3396 	fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
3397 		le32_to_cpu(me_hdr->ucode_offset_bytes));
3398 	fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
3399 	/* data */
3400 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3401 		le32_to_cpu(me_hdr->data_offset_bytes));
3402 	fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
3403 
3404 	/* 64kb align*/
3405 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3406 				      64 * 1024,
3407 				      AMDGPU_GEM_DOMAIN_VRAM |
3408 				      AMDGPU_GEM_DOMAIN_GTT,
3409 				      &adev->gfx.me.me_fw_obj,
3410 				      &adev->gfx.me.me_fw_gpu_addr,
3411 				      (void **)&adev->gfx.me.me_fw_ptr);
3412 	if (r) {
3413 		dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
3414 		gfx_v11_0_me_fini(adev);
3415 		return r;
3416 	}
3417 
3418 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3419 				      64 * 1024,
3420 				      AMDGPU_GEM_DOMAIN_VRAM |
3421 				      AMDGPU_GEM_DOMAIN_GTT,
3422 				      &adev->gfx.me.me_fw_data_obj,
3423 				      &adev->gfx.me.me_fw_data_gpu_addr,
3424 				      (void **)&adev->gfx.me.me_fw_data_ptr);
3425 	if (r) {
3426 		dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
3427 		gfx_v11_0_pfp_fini(adev);
3428 		return r;
3429 	}
3430 
3431 	memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
3432 	memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
3433 
3434 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3435 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
3436 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3437 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
3438 
3439 	if (amdgpu_emu_mode == 1)
3440 		amdgpu_device_flush_hdp(adev, NULL);
3441 
3442 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
3443 		lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
3444 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
3445 		upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
3446 
3447 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
3448 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
3449 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
3450 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
3451 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
3452 
3453 	/*
3454 	 * Programming any of the CP_ME_IC_BASE registers
3455 	 * forces invalidation of the ME L1 I$. Wait for the
3456 	 * invalidation complete
3457 	 */
3458 	for (i = 0; i < usec_timeout; i++) {
3459 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3460 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3461 			INVALIDATE_CACHE_COMPLETE))
3462 			break;
3463 		udelay(1);
3464 	}
3465 
3466 	if (i >= usec_timeout) {
3467 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3468 		return -EINVAL;
3469 	}
3470 
3471 	/* Prime the instruction caches */
3472 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3473 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3474 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3475 
3476 	/* Waiting for instruction cache primed*/
3477 	for (i = 0; i < usec_timeout; i++) {
3478 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3479 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3480 			ICACHE_PRIMED))
3481 			break;
3482 		udelay(1);
3483 	}
3484 
3485 	if (i >= usec_timeout) {
3486 		dev_err(adev->dev, "failed to prime instruction cache\n");
3487 		return -EINVAL;
3488 	}
3489 
3490 	mutex_lock(&adev->srbm_mutex);
3491 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3492 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3493 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3494 			(me_hdr->ucode_start_addr_hi << 30) |
3495 			(me_hdr->ucode_start_addr_lo >> 2) );
3496 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3497 			me_hdr->ucode_start_addr_hi>>2);
3498 
3499 		/*
3500 		 * Program CP_ME_CNTL to reset given PIPE to take
3501 		 * effect of CP_PFP_PRGRM_CNTR_START.
3502 		 */
3503 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3504 		if (pipe_id == 0)
3505 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3506 					ME_PIPE0_RESET, 1);
3507 		else
3508 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3509 					ME_PIPE1_RESET, 1);
3510 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3511 
3512 		/* Clear pfp pipe0 reset bit. */
3513 		if (pipe_id == 0)
3514 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3515 					ME_PIPE0_RESET, 0);
3516 		else
3517 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3518 					ME_PIPE1_RESET, 0);
3519 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3520 
3521 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3522 			lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3523 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3524 			upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3525 	}
3526 	soc21_grbm_select(adev, 0, 0, 0, 0);
3527 	mutex_unlock(&adev->srbm_mutex);
3528 
3529 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3530 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3531 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3532 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3533 
3534 	/* Invalidate the data caches */
3535 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3536 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3537 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3538 
3539 	for (i = 0; i < usec_timeout; i++) {
3540 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3541 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3542 			INVALIDATE_DCACHE_COMPLETE))
3543 			break;
3544 		udelay(1);
3545 	}
3546 
3547 	if (i >= usec_timeout) {
3548 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3549 		return -EINVAL;
3550 	}
3551 
3552 	return 0;
3553 }
3554 
3555 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3556 {
3557 	int r;
3558 
3559 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3560 		return -EINVAL;
3561 
3562 	gfx_v11_0_cp_gfx_enable(adev, false);
3563 
3564 	if (adev->gfx.rs64_enable)
3565 		r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3566 	else
3567 		r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3568 	if (r) {
3569 		dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3570 		return r;
3571 	}
3572 
3573 	if (adev->gfx.rs64_enable)
3574 		r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3575 	else
3576 		r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3577 	if (r) {
3578 		dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3579 		return r;
3580 	}
3581 
3582 	return 0;
3583 }
3584 
3585 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3586 {
3587 	struct amdgpu_ring *ring;
3588 	const struct cs_section_def *sect = NULL;
3589 	const struct cs_extent_def *ext = NULL;
3590 	int r, i;
3591 	int ctx_reg_offset;
3592 
3593 	/* init the CP */
3594 	WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3595 		     adev->gfx.config.max_hw_contexts - 1);
3596 	WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3597 
3598 	if (!amdgpu_async_gfx_ring)
3599 		gfx_v11_0_cp_gfx_enable(adev, true);
3600 
3601 	ring = &adev->gfx.gfx_ring[0];
3602 	r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3603 	if (r) {
3604 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3605 		return r;
3606 	}
3607 
3608 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3609 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3610 
3611 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3612 	amdgpu_ring_write(ring, 0x80000000);
3613 	amdgpu_ring_write(ring, 0x80000000);
3614 
3615 	for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3616 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3617 			if (sect->id == SECT_CONTEXT) {
3618 				amdgpu_ring_write(ring,
3619 						  PACKET3(PACKET3_SET_CONTEXT_REG,
3620 							  ext->reg_count));
3621 				amdgpu_ring_write(ring, ext->reg_index -
3622 						  PACKET3_SET_CONTEXT_REG_START);
3623 				for (i = 0; i < ext->reg_count; i++)
3624 					amdgpu_ring_write(ring, ext->extent[i]);
3625 			}
3626 		}
3627 	}
3628 
3629 	ctx_reg_offset =
3630 		SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3631 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3632 	amdgpu_ring_write(ring, ctx_reg_offset);
3633 	amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3634 
3635 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3636 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3637 
3638 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3639 	amdgpu_ring_write(ring, 0);
3640 
3641 	amdgpu_ring_commit(ring);
3642 
3643 	/* submit cs packet to copy state 0 to next available state */
3644 	if (adev->gfx.num_gfx_rings > 1) {
3645 		/* maximum supported gfx ring is 2 */
3646 		ring = &adev->gfx.gfx_ring[1];
3647 		r = amdgpu_ring_alloc(ring, 2);
3648 		if (r) {
3649 			DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3650 			return r;
3651 		}
3652 
3653 		amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3654 		amdgpu_ring_write(ring, 0);
3655 
3656 		amdgpu_ring_commit(ring);
3657 	}
3658 	return 0;
3659 }
3660 
3661 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3662 					 CP_PIPE_ID pipe)
3663 {
3664 	u32 tmp;
3665 
3666 	tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3667 	tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3668 
3669 	WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3670 }
3671 
3672 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3673 					  struct amdgpu_ring *ring)
3674 {
3675 	u32 tmp;
3676 
3677 	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3678 	if (ring->use_doorbell) {
3679 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3680 				    DOORBELL_OFFSET, ring->doorbell_index);
3681 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3682 				    DOORBELL_EN, 1);
3683 	} else {
3684 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3685 				    DOORBELL_EN, 0);
3686 	}
3687 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3688 
3689 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3690 			    DOORBELL_RANGE_LOWER, ring->doorbell_index);
3691 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3692 
3693 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3694 		     CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3695 }
3696 
3697 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3698 {
3699 	struct amdgpu_ring *ring;
3700 	u32 tmp;
3701 	u32 rb_bufsz;
3702 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
3703 
3704 	/* Set the write pointer delay */
3705 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3706 
3707 	/* set the RB to use vmid 0 */
3708 	WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3709 
3710 	/* Init gfx ring 0 for pipe 0 */
3711 	mutex_lock(&adev->srbm_mutex);
3712 	gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3713 
3714 	/* Set ring buffer size */
3715 	ring = &adev->gfx.gfx_ring[0];
3716 	rb_bufsz = order_base_2(ring->ring_size / 8);
3717 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3718 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3719 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3720 
3721 	/* Initialize the ring buffer's write pointers */
3722 	ring->wptr = 0;
3723 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3724 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3725 
3726 	/* set the wb address whether it's enabled or not */
3727 	rptr_addr = ring->rptr_gpu_addr;
3728 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3729 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3730 		     CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3731 
3732 	wptr_gpu_addr = ring->wptr_gpu_addr;
3733 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3734 		     lower_32_bits(wptr_gpu_addr));
3735 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3736 		     upper_32_bits(wptr_gpu_addr));
3737 
3738 	mdelay(1);
3739 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3740 
3741 	rb_addr = ring->gpu_addr >> 8;
3742 	WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3743 	WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3744 
3745 	WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3746 
3747 	gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3748 	mutex_unlock(&adev->srbm_mutex);
3749 
3750 	/* Init gfx ring 1 for pipe 1 */
3751 	if (adev->gfx.num_gfx_rings > 1) {
3752 		mutex_lock(&adev->srbm_mutex);
3753 		gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3754 		/* maximum supported gfx ring is 2 */
3755 		ring = &adev->gfx.gfx_ring[1];
3756 		rb_bufsz = order_base_2(ring->ring_size / 8);
3757 		tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3758 		tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3759 		WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3760 		/* Initialize the ring buffer's write pointers */
3761 		ring->wptr = 0;
3762 		WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3763 		WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3764 		/* Set the wb address whether it's enabled or not */
3765 		rptr_addr = ring->rptr_gpu_addr;
3766 		WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3767 		WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3768 			     CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3769 		wptr_gpu_addr = ring->wptr_gpu_addr;
3770 		WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3771 			     lower_32_bits(wptr_gpu_addr));
3772 		WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3773 			     upper_32_bits(wptr_gpu_addr));
3774 
3775 		mdelay(1);
3776 		WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3777 
3778 		rb_addr = ring->gpu_addr >> 8;
3779 		WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3780 		WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3781 		WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3782 
3783 		gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3784 		mutex_unlock(&adev->srbm_mutex);
3785 	}
3786 	/* Switch to pipe 0 */
3787 	mutex_lock(&adev->srbm_mutex);
3788 	gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3789 	mutex_unlock(&adev->srbm_mutex);
3790 
3791 	/* start the ring */
3792 	gfx_v11_0_cp_gfx_start(adev);
3793 
3794 	return 0;
3795 }
3796 
3797 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3798 {
3799 	u32 data;
3800 
3801 	if (adev->gfx.rs64_enable) {
3802 		data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3803 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3804 							 enable ? 0 : 1);
3805 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3806 							 enable ? 0 : 1);
3807 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3808 							 enable ? 0 : 1);
3809 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3810 							 enable ? 0 : 1);
3811 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3812 							 enable ? 0 : 1);
3813 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3814 							 enable ? 1 : 0);
3815 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3816 				                         enable ? 1 : 0);
3817 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3818 							 enable ? 1 : 0);
3819 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3820 							 enable ? 1 : 0);
3821 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3822 							 enable ? 0 : 1);
3823 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3824 	} else {
3825 		data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3826 
3827 		if (enable) {
3828 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3829 			if (!adev->enable_mes_kiq)
3830 				data = REG_SET_FIELD(data, CP_MEC_CNTL,
3831 						     MEC_ME2_HALT, 0);
3832 		} else {
3833 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3834 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3835 		}
3836 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3837 	}
3838 
3839 	udelay(50);
3840 }
3841 
3842 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3843 {
3844 	const struct gfx_firmware_header_v1_0 *mec_hdr;
3845 	const __le32 *fw_data;
3846 	unsigned i, fw_size;
3847 	u32 *fw = NULL;
3848 	int r;
3849 
3850 	if (!adev->gfx.mec_fw)
3851 		return -EINVAL;
3852 
3853 	gfx_v11_0_cp_compute_enable(adev, false);
3854 
3855 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3856 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3857 
3858 	fw_data = (const __le32 *)
3859 		(adev->gfx.mec_fw->data +
3860 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3861 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3862 
3863 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3864 					  PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3865 					  &adev->gfx.mec.mec_fw_obj,
3866 					  &adev->gfx.mec.mec_fw_gpu_addr,
3867 					  (void **)&fw);
3868 	if (r) {
3869 		dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3870 		gfx_v11_0_mec_fini(adev);
3871 		return r;
3872 	}
3873 
3874 	memcpy(fw, fw_data, fw_size);
3875 
3876 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3877 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3878 
3879 	gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3880 
3881 	/* MEC1 */
3882 	WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3883 
3884 	for (i = 0; i < mec_hdr->jt_size; i++)
3885 		WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3886 			     le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3887 
3888 	WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3889 
3890 	return 0;
3891 }
3892 
3893 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3894 {
3895 	const struct gfx_firmware_header_v2_0 *mec_hdr;
3896 	const __le32 *fw_ucode, *fw_data;
3897 	u32 tmp, fw_ucode_size, fw_data_size;
3898 	u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3899 	u32 *fw_ucode_ptr, *fw_data_ptr;
3900 	int r;
3901 
3902 	if (!adev->gfx.mec_fw)
3903 		return -EINVAL;
3904 
3905 	gfx_v11_0_cp_compute_enable(adev, false);
3906 
3907 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3908 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3909 
3910 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3911 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
3912 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3913 
3914 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3915 				le32_to_cpu(mec_hdr->data_offset_bytes));
3916 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3917 
3918 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3919 				      64 * 1024,
3920 				      AMDGPU_GEM_DOMAIN_VRAM |
3921 				      AMDGPU_GEM_DOMAIN_GTT,
3922 				      &adev->gfx.mec.mec_fw_obj,
3923 				      &adev->gfx.mec.mec_fw_gpu_addr,
3924 				      (void **)&fw_ucode_ptr);
3925 	if (r) {
3926 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3927 		gfx_v11_0_mec_fini(adev);
3928 		return r;
3929 	}
3930 
3931 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3932 				      64 * 1024,
3933 				      AMDGPU_GEM_DOMAIN_VRAM |
3934 				      AMDGPU_GEM_DOMAIN_GTT,
3935 				      &adev->gfx.mec.mec_fw_data_obj,
3936 				      &adev->gfx.mec.mec_fw_data_gpu_addr,
3937 				      (void **)&fw_data_ptr);
3938 	if (r) {
3939 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3940 		gfx_v11_0_mec_fini(adev);
3941 		return r;
3942 	}
3943 
3944 	memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3945 	memcpy(fw_data_ptr, fw_data, fw_data_size);
3946 
3947 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3948 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3949 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3950 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3951 
3952 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3953 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3954 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3955 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3956 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3957 
3958 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3959 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3960 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3961 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3962 
3963 	mutex_lock(&adev->srbm_mutex);
3964 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3965 		soc21_grbm_select(adev, 1, i, 0, 0);
3966 
3967 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3968 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3969 		     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3970 
3971 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3972 					mec_hdr->ucode_start_addr_lo >> 2 |
3973 					mec_hdr->ucode_start_addr_hi << 30);
3974 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3975 					mec_hdr->ucode_start_addr_hi >> 2);
3976 
3977 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3978 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3979 		     upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3980 	}
3981 	mutex_unlock(&adev->srbm_mutex);
3982 	soc21_grbm_select(adev, 0, 0, 0, 0);
3983 
3984 	/* Trigger an invalidation of the L1 instruction caches */
3985 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3986 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3987 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3988 
3989 	/* Wait for invalidation complete */
3990 	for (i = 0; i < usec_timeout; i++) {
3991 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3992 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3993 				       INVALIDATE_DCACHE_COMPLETE))
3994 			break;
3995 		udelay(1);
3996 	}
3997 
3998 	if (i >= usec_timeout) {
3999 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
4000 		return -EINVAL;
4001 	}
4002 
4003 	/* Trigger an invalidation of the L1 instruction caches */
4004 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
4005 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
4006 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
4007 
4008 	/* Wait for invalidation complete */
4009 	for (i = 0; i < usec_timeout; i++) {
4010 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
4011 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
4012 				       INVALIDATE_CACHE_COMPLETE))
4013 			break;
4014 		udelay(1);
4015 	}
4016 
4017 	if (i >= usec_timeout) {
4018 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
4019 		return -EINVAL;
4020 	}
4021 
4022 	return 0;
4023 }
4024 
4025 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
4026 {
4027 	uint32_t tmp;
4028 	struct amdgpu_device *adev = ring->adev;
4029 
4030 	/* tell RLC which is KIQ queue */
4031 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
4032 	tmp &= 0xffffff00;
4033 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4034 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
4035 }
4036 
4037 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
4038 {
4039 	/* set graphics engine doorbell range */
4040 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
4041 		     (adev->doorbell_index.gfx_ring0 * 2) << 2);
4042 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
4043 		     (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
4044 
4045 	/* set compute engine doorbell range */
4046 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
4047 		     (adev->doorbell_index.kiq * 2) << 2);
4048 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
4049 		     (adev->doorbell_index.userqueue_end * 2) << 2);
4050 }
4051 
4052 static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
4053 					   struct v11_gfx_mqd *mqd,
4054 					   struct amdgpu_mqd_prop *prop)
4055 {
4056 	bool priority = 0;
4057 	u32 tmp;
4058 
4059 	/* set up default queue priority level
4060 	 * 0x0 = low priority, 0x1 = high priority
4061 	 */
4062 	if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
4063 		priority = 1;
4064 
4065 	tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
4066 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
4067 	mqd->cp_gfx_hqd_queue_priority = tmp;
4068 }
4069 
4070 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
4071 				  struct amdgpu_mqd_prop *prop)
4072 {
4073 	struct v11_gfx_mqd *mqd = m;
4074 	uint64_t hqd_gpu_addr, wb_gpu_addr;
4075 	uint32_t tmp;
4076 	uint32_t rb_bufsz;
4077 
4078 	/* set up gfx hqd wptr */
4079 	mqd->cp_gfx_hqd_wptr = 0;
4080 	mqd->cp_gfx_hqd_wptr_hi = 0;
4081 
4082 	/* set the pointer to the MQD */
4083 	mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
4084 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
4085 
4086 	/* set up mqd control */
4087 	tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
4088 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
4089 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
4090 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
4091 	mqd->cp_gfx_mqd_control = tmp;
4092 
4093 	/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
4094 	tmp = regCP_GFX_HQD_VMID_DEFAULT;
4095 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
4096 	mqd->cp_gfx_hqd_vmid = 0;
4097 
4098 	/* set up gfx queue priority */
4099 	gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
4100 
4101 	/* set up time quantum */
4102 	tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
4103 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
4104 	mqd->cp_gfx_hqd_quantum = tmp;
4105 
4106 	/* set up gfx hqd base. this is similar as CP_RB_BASE */
4107 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
4108 	mqd->cp_gfx_hqd_base = hqd_gpu_addr;
4109 	mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
4110 
4111 	/* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
4112 	wb_gpu_addr = prop->rptr_gpu_addr;
4113 	mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
4114 	mqd->cp_gfx_hqd_rptr_addr_hi =
4115 		upper_32_bits(wb_gpu_addr) & 0xffff;
4116 
4117 	/* set up rb_wptr_poll addr */
4118 	wb_gpu_addr = prop->wptr_gpu_addr;
4119 	mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4120 	mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4121 
4122 	/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
4123 	rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
4124 	tmp = regCP_GFX_HQD_CNTL_DEFAULT;
4125 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
4126 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
4127 #ifdef __BIG_ENDIAN
4128 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
4129 #endif
4130 	if (prop->tmz_queue)
4131 		tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
4132 	if (!prop->kernel_queue)
4133 		tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
4134 	mqd->cp_gfx_hqd_cntl = tmp;
4135 
4136 	/* set up cp_doorbell_control */
4137 	tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
4138 	if (prop->use_doorbell) {
4139 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4140 				    DOORBELL_OFFSET, prop->doorbell_index);
4141 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4142 				    DOORBELL_EN, 1);
4143 	} else
4144 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4145 				    DOORBELL_EN, 0);
4146 	mqd->cp_rb_doorbell_control = tmp;
4147 
4148 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4149 	mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
4150 
4151 	/* active the queue */
4152 	mqd->cp_gfx_hqd_active = 1;
4153 
4154 	/* set gfx UQ items */
4155 	mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
4156 	mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
4157 	mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr);
4158 	mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr);
4159 	mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
4160 	mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
4161 	mqd->fence_address_lo = lower_32_bits(prop->fence_address);
4162 	mqd->fence_address_hi = upper_32_bits(prop->fence_address);
4163 
4164 	return 0;
4165 }
4166 
4167 static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
4168 {
4169 	struct amdgpu_device *adev = ring->adev;
4170 	struct v11_gfx_mqd *mqd = ring->mqd_ptr;
4171 	int mqd_idx = ring - &adev->gfx.gfx_ring[0];
4172 
4173 	if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
4174 		memset((void *)mqd, 0, sizeof(*mqd));
4175 		mutex_lock(&adev->srbm_mutex);
4176 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4177 		amdgpu_ring_init_mqd(ring);
4178 		soc21_grbm_select(adev, 0, 0, 0, 0);
4179 		mutex_unlock(&adev->srbm_mutex);
4180 		if (adev->gfx.me.mqd_backup[mqd_idx])
4181 			memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4182 	} else {
4183 		/* restore mqd with the backup copy */
4184 		if (adev->gfx.me.mqd_backup[mqd_idx])
4185 			memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
4186 		/* reset the ring */
4187 		ring->wptr = 0;
4188 		*ring->wptr_cpu_addr = 0;
4189 		amdgpu_ring_clear_ring(ring);
4190 	}
4191 
4192 	return 0;
4193 }
4194 
4195 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
4196 {
4197 	int r, i;
4198 
4199 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4200 		r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
4201 		if (r)
4202 			return r;
4203 	}
4204 
4205 	r = amdgpu_gfx_enable_kgq(adev, 0);
4206 	if (r)
4207 		return r;
4208 
4209 	return gfx_v11_0_cp_gfx_start(adev);
4210 }
4211 
4212 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
4213 				      struct amdgpu_mqd_prop *prop)
4214 {
4215 	struct v11_compute_mqd *mqd = m;
4216 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4217 	uint32_t tmp;
4218 
4219 	mqd->header = 0xC0310800;
4220 	mqd->compute_pipelinestat_enable = 0x00000001;
4221 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4222 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4223 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4224 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4225 	mqd->compute_misc_reserved = 0x00000007;
4226 
4227 	eop_base_addr = prop->eop_gpu_addr >> 8;
4228 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4229 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4230 
4231 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4232 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
4233 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4234 			(order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
4235 
4236 	mqd->cp_hqd_eop_control = tmp;
4237 
4238 	/* enable doorbell? */
4239 	tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
4240 
4241 	if (prop->use_doorbell) {
4242 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4243 				    DOORBELL_OFFSET, prop->doorbell_index);
4244 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4245 				    DOORBELL_EN, 1);
4246 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4247 				    DOORBELL_SOURCE, 0);
4248 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4249 				    DOORBELL_HIT, 0);
4250 	} else {
4251 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4252 				    DOORBELL_EN, 0);
4253 	}
4254 
4255 	mqd->cp_hqd_pq_doorbell_control = tmp;
4256 
4257 	/* disable the queue if it's active */
4258 	mqd->cp_hqd_dequeue_request = 0;
4259 	mqd->cp_hqd_pq_rptr = 0;
4260 	mqd->cp_hqd_pq_wptr_lo = 0;
4261 	mqd->cp_hqd_pq_wptr_hi = 0;
4262 
4263 	/* set the pointer to the MQD */
4264 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
4265 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
4266 
4267 	/* set MQD vmid to 0 */
4268 	tmp = regCP_MQD_CONTROL_DEFAULT;
4269 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4270 	mqd->cp_mqd_control = tmp;
4271 
4272 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4273 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
4274 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4275 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4276 
4277 	/* set up the HQD, this is similar to CP_RB0_CNTL */
4278 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
4279 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4280 			    (order_base_2(prop->queue_size / 4) - 1));
4281 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4282 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4283 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
4284 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
4285 			    prop->allow_tunneling);
4286 	if (prop->kernel_queue) {
4287 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4288 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4289 	}
4290 	if (prop->tmz_queue)
4291 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
4292 	mqd->cp_hqd_pq_control = tmp;
4293 
4294 	/* set the wb address whether it's enabled or not */
4295 	wb_gpu_addr = prop->rptr_gpu_addr;
4296 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4297 	mqd->cp_hqd_pq_rptr_report_addr_hi =
4298 		upper_32_bits(wb_gpu_addr) & 0xffff;
4299 
4300 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4301 	wb_gpu_addr = prop->wptr_gpu_addr;
4302 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4303 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4304 
4305 	tmp = 0;
4306 	/* enable the doorbell if requested */
4307 	if (prop->use_doorbell) {
4308 		tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
4309 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4310 				DOORBELL_OFFSET, prop->doorbell_index);
4311 
4312 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4313 				    DOORBELL_EN, 1);
4314 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4315 				    DOORBELL_SOURCE, 0);
4316 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4317 				    DOORBELL_HIT, 0);
4318 	}
4319 
4320 	mqd->cp_hqd_pq_doorbell_control = tmp;
4321 
4322 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4323 	mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
4324 
4325 	/* set the vmid for the queue */
4326 	mqd->cp_hqd_vmid = 0;
4327 
4328 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
4329 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
4330 	mqd->cp_hqd_persistent_state = tmp;
4331 
4332 	/* set MIN_IB_AVAIL_SIZE */
4333 	tmp = regCP_HQD_IB_CONTROL_DEFAULT;
4334 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4335 	mqd->cp_hqd_ib_control = tmp;
4336 
4337 	/* set static priority for a compute queue/ring */
4338 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
4339 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
4340 
4341 	mqd->cp_hqd_active = prop->hqd_active;
4342 
4343 	/* set UQ fenceaddress */
4344 	mqd->fence_address_lo = lower_32_bits(prop->fence_address);
4345 	mqd->fence_address_hi = upper_32_bits(prop->fence_address);
4346 
4347 	return 0;
4348 }
4349 
4350 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
4351 {
4352 	struct amdgpu_device *adev = ring->adev;
4353 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4354 	int j;
4355 
4356 	/* inactivate the queue */
4357 	if (amdgpu_sriov_vf(adev))
4358 		WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
4359 
4360 	/* disable wptr polling */
4361 	WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4362 
4363 	/* write the EOP addr */
4364 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
4365 	       mqd->cp_hqd_eop_base_addr_lo);
4366 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
4367 	       mqd->cp_hqd_eop_base_addr_hi);
4368 
4369 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4370 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
4371 	       mqd->cp_hqd_eop_control);
4372 
4373 	/* enable doorbell? */
4374 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4375 	       mqd->cp_hqd_pq_doorbell_control);
4376 
4377 	/* disable the queue if it's active */
4378 	if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
4379 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
4380 		for (j = 0; j < adev->usec_timeout; j++) {
4381 			if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
4382 				break;
4383 			udelay(1);
4384 		}
4385 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
4386 		       mqd->cp_hqd_dequeue_request);
4387 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
4388 		       mqd->cp_hqd_pq_rptr);
4389 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4390 		       mqd->cp_hqd_pq_wptr_lo);
4391 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4392 		       mqd->cp_hqd_pq_wptr_hi);
4393 	}
4394 
4395 	/* set the pointer to the MQD */
4396 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
4397 	       mqd->cp_mqd_base_addr_lo);
4398 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
4399 	       mqd->cp_mqd_base_addr_hi);
4400 
4401 	/* set MQD vmid to 0 */
4402 	WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
4403 	       mqd->cp_mqd_control);
4404 
4405 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4406 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
4407 	       mqd->cp_hqd_pq_base_lo);
4408 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
4409 	       mqd->cp_hqd_pq_base_hi);
4410 
4411 	/* set up the HQD, this is similar to CP_RB0_CNTL */
4412 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
4413 	       mqd->cp_hqd_pq_control);
4414 
4415 	/* set the wb address whether it's enabled or not */
4416 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
4417 		mqd->cp_hqd_pq_rptr_report_addr_lo);
4418 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
4419 		mqd->cp_hqd_pq_rptr_report_addr_hi);
4420 
4421 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4422 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
4423 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
4424 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
4425 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
4426 
4427 	/* enable the doorbell if requested */
4428 	if (ring->use_doorbell) {
4429 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
4430 			(adev->doorbell_index.kiq * 2) << 2);
4431 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
4432 			(adev->doorbell_index.userqueue_end * 2) << 2);
4433 	}
4434 
4435 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4436 	       mqd->cp_hqd_pq_doorbell_control);
4437 
4438 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4439 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4440 	       mqd->cp_hqd_pq_wptr_lo);
4441 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4442 	       mqd->cp_hqd_pq_wptr_hi);
4443 
4444 	/* set the vmid for the queue */
4445 	WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
4446 
4447 	WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
4448 	       mqd->cp_hqd_persistent_state);
4449 
4450 	/* activate the queue */
4451 	WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
4452 	       mqd->cp_hqd_active);
4453 
4454 	if (ring->use_doorbell)
4455 		WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4456 
4457 	return 0;
4458 }
4459 
4460 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
4461 {
4462 	struct amdgpu_device *adev = ring->adev;
4463 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4464 
4465 	gfx_v11_0_kiq_setting(ring);
4466 
4467 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4468 		/* reset MQD to a clean status */
4469 		if (adev->gfx.kiq[0].mqd_backup)
4470 			memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
4471 
4472 		/* reset ring buffer */
4473 		ring->wptr = 0;
4474 		amdgpu_ring_clear_ring(ring);
4475 
4476 		mutex_lock(&adev->srbm_mutex);
4477 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4478 		gfx_v11_0_kiq_init_register(ring);
4479 		soc21_grbm_select(adev, 0, 0, 0, 0);
4480 		mutex_unlock(&adev->srbm_mutex);
4481 	} else {
4482 		memset((void *)mqd, 0, sizeof(*mqd));
4483 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4484 			amdgpu_ring_clear_ring(ring);
4485 		mutex_lock(&adev->srbm_mutex);
4486 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4487 		amdgpu_ring_init_mqd(ring);
4488 		gfx_v11_0_kiq_init_register(ring);
4489 		soc21_grbm_select(adev, 0, 0, 0, 0);
4490 		mutex_unlock(&adev->srbm_mutex);
4491 
4492 		if (adev->gfx.kiq[0].mqd_backup)
4493 			memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
4494 	}
4495 
4496 	return 0;
4497 }
4498 
4499 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
4500 {
4501 	struct amdgpu_device *adev = ring->adev;
4502 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4503 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
4504 
4505 	if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
4506 		memset((void *)mqd, 0, sizeof(*mqd));
4507 		mutex_lock(&adev->srbm_mutex);
4508 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4509 		amdgpu_ring_init_mqd(ring);
4510 		soc21_grbm_select(adev, 0, 0, 0, 0);
4511 		mutex_unlock(&adev->srbm_mutex);
4512 
4513 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4514 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4515 	} else {
4516 		/* restore MQD to a clean status */
4517 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4518 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4519 		/* reset ring buffer */
4520 		ring->wptr = 0;
4521 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4522 		amdgpu_ring_clear_ring(ring);
4523 	}
4524 
4525 	return 0;
4526 }
4527 
4528 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4529 {
4530 	gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
4531 	return 0;
4532 }
4533 
4534 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4535 {
4536 	int i, r;
4537 
4538 	if (!amdgpu_async_gfx_ring)
4539 		gfx_v11_0_cp_compute_enable(adev, true);
4540 
4541 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4542 		r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
4543 		if (r)
4544 			return r;
4545 	}
4546 
4547 	return amdgpu_gfx_enable_kcq(adev, 0);
4548 }
4549 
4550 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4551 {
4552 	int r, i;
4553 	struct amdgpu_ring *ring;
4554 
4555 	if (!(adev->flags & AMD_IS_APU))
4556 		gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4557 
4558 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4559 		/* legacy firmware loading */
4560 		r = gfx_v11_0_cp_gfx_load_microcode(adev);
4561 		if (r)
4562 			return r;
4563 
4564 		if (adev->gfx.rs64_enable)
4565 			r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4566 		else
4567 			r = gfx_v11_0_cp_compute_load_microcode(adev);
4568 		if (r)
4569 			return r;
4570 	}
4571 
4572 	gfx_v11_0_cp_set_doorbell_range(adev);
4573 
4574 	if (amdgpu_async_gfx_ring) {
4575 		gfx_v11_0_cp_compute_enable(adev, true);
4576 		gfx_v11_0_cp_gfx_enable(adev, true);
4577 	}
4578 
4579 	if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4580 		r = amdgpu_mes_kiq_hw_init(adev);
4581 	else
4582 		r = gfx_v11_0_kiq_resume(adev);
4583 	if (r)
4584 		return r;
4585 
4586 	r = gfx_v11_0_kcq_resume(adev);
4587 	if (r)
4588 		return r;
4589 
4590 	if (!amdgpu_async_gfx_ring) {
4591 		r = gfx_v11_0_cp_gfx_resume(adev);
4592 		if (r)
4593 			return r;
4594 	} else {
4595 		r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4596 		if (r)
4597 			return r;
4598 	}
4599 
4600 	if (adev->gfx.disable_kq) {
4601 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4602 			ring = &adev->gfx.gfx_ring[i];
4603 			/* we don't want to set ring->ready */
4604 			r = amdgpu_ring_test_ring(ring);
4605 			if (r)
4606 				return r;
4607 		}
4608 		if (amdgpu_async_gfx_ring)
4609 			amdgpu_gfx_disable_kgq(adev, 0);
4610 	} else {
4611 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4612 			ring = &adev->gfx.gfx_ring[i];
4613 			r = amdgpu_ring_test_helper(ring);
4614 			if (r)
4615 				return r;
4616 		}
4617 	}
4618 
4619 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4620 		ring = &adev->gfx.compute_ring[i];
4621 		r = amdgpu_ring_test_helper(ring);
4622 		if (r)
4623 			return r;
4624 	}
4625 
4626 	return 0;
4627 }
4628 
4629 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4630 {
4631 	gfx_v11_0_cp_gfx_enable(adev, enable);
4632 	gfx_v11_0_cp_compute_enable(adev, enable);
4633 }
4634 
4635 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4636 {
4637 	int r;
4638 	bool value;
4639 
4640 	r = adev->gfxhub.funcs->gart_enable(adev);
4641 	if (r)
4642 		return r;
4643 
4644 	amdgpu_device_flush_hdp(adev, NULL);
4645 
4646 	value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
4647 
4648 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4649 	/* TODO investigate why this and the hdp flush above is needed,
4650 	 * are we missing a flush somewhere else? */
4651 	adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
4652 
4653 	return 0;
4654 }
4655 
4656 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4657 {
4658 	u32 tmp;
4659 
4660 	/* select RS64 */
4661 	if (adev->gfx.rs64_enable) {
4662 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4663 		tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4664 		WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4665 
4666 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4667 		tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4668 		WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4669 	}
4670 
4671 	if (amdgpu_emu_mode == 1)
4672 		msleep(100);
4673 }
4674 
4675 static int get_gb_addr_config(struct amdgpu_device * adev)
4676 {
4677 	u32 gb_addr_config;
4678 
4679 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4680 	if (gb_addr_config == 0)
4681 		return -EINVAL;
4682 
4683 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
4684 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4685 
4686 	adev->gfx.config.gb_addr_config = gb_addr_config;
4687 
4688 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4689 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4690 				      GB_ADDR_CONFIG, NUM_PIPES);
4691 
4692 	adev->gfx.config.max_tile_pipes =
4693 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4694 
4695 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4696 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4697 				      GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4698 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4699 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4700 				      GB_ADDR_CONFIG, NUM_RB_PER_SE);
4701 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4702 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4703 				      GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4704 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4705 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4706 				      GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4707 
4708 	return 0;
4709 }
4710 
4711 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4712 {
4713 	uint32_t data;
4714 
4715 	data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4716 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4717 	WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4718 
4719 	data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4720 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4721 	WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4722 }
4723 
4724 static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
4725 {
4726 	int r;
4727 	struct amdgpu_device *adev = ip_block->adev;
4728 
4729 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
4730 				       adev->gfx.cleaner_shader_ptr);
4731 
4732 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4733 		if (adev->gfx.imu.funcs) {
4734 			/* RLC autoload sequence 1: Program rlc ram */
4735 			if (adev->gfx.imu.funcs->program_rlc_ram)
4736 				adev->gfx.imu.funcs->program_rlc_ram(adev);
4737 			/* rlc autoload firmware */
4738 			r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4739 			if (r)
4740 				return r;
4741 		}
4742 	} else {
4743 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4744 			if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4745 				if (adev->gfx.imu.funcs->load_microcode)
4746 					adev->gfx.imu.funcs->load_microcode(adev);
4747 				if (adev->gfx.imu.funcs->setup_imu)
4748 					adev->gfx.imu.funcs->setup_imu(adev);
4749 				if (adev->gfx.imu.funcs->start_imu)
4750 					adev->gfx.imu.funcs->start_imu(adev);
4751 			}
4752 
4753 			/* disable gpa mode in backdoor loading */
4754 			gfx_v11_0_disable_gpa_mode(adev);
4755 		}
4756 	}
4757 
4758 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4759 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4760 		r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4761 		if (r) {
4762 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4763 			return r;
4764 		}
4765 	}
4766 
4767 	adev->gfx.is_poweron = true;
4768 
4769 	if(get_gb_addr_config(adev))
4770 		DRM_WARN("Invalid gb_addr_config !\n");
4771 
4772 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4773 	    adev->gfx.rs64_enable)
4774 		gfx_v11_0_config_gfx_rs64(adev);
4775 
4776 	r = gfx_v11_0_gfxhub_enable(adev);
4777 	if (r)
4778 		return r;
4779 
4780 	if (!amdgpu_emu_mode)
4781 		gfx_v11_0_init_golden_registers(adev);
4782 
4783 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4784 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4785 		/**
4786 		 * For gfx 11, rlc firmware loading relies on smu firmware is
4787 		 * loaded firstly, so in direct type, it has to load smc ucode
4788 		 * here before rlc.
4789 		 */
4790 		r = amdgpu_pm_load_smu_firmware(adev, NULL);
4791 		if (r)
4792 			return r;
4793 	}
4794 
4795 	gfx_v11_0_constants_init(adev);
4796 
4797 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4798 		gfx_v11_0_select_cp_fw_arch(adev);
4799 
4800 	if (adev->nbio.funcs->gc_doorbell_init)
4801 		adev->nbio.funcs->gc_doorbell_init(adev);
4802 
4803 	r = gfx_v11_0_rlc_resume(adev);
4804 	if (r)
4805 		return r;
4806 
4807 	/*
4808 	 * init golden registers and rlc resume may override some registers,
4809 	 * reconfig them here
4810 	 */
4811 	gfx_v11_0_tcp_harvest(adev);
4812 
4813 	r = gfx_v11_0_cp_resume(adev);
4814 	if (r)
4815 		return r;
4816 
4817 	/* get IMU version from HW if it's not set */
4818 	if (!adev->gfx.imu_fw_version)
4819 		adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0);
4820 
4821 	return r;
4822 }
4823 
4824 static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
4825 					      bool enable)
4826 {
4827 	unsigned int irq_type;
4828 	int m, p, r;
4829 
4830 	if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
4831 		for (m = 0; m < adev->gfx.me.num_me; m++) {
4832 			for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
4833 				irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
4834 				if (enable)
4835 					r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
4836 							   irq_type);
4837 				else
4838 					r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
4839 							   irq_type);
4840 				if (r)
4841 					return r;
4842 			}
4843 		}
4844 	}
4845 
4846 	if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
4847 		for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
4848 			for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
4849 				irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
4850 					+ (m * adev->gfx.mec.num_pipe_per_mec)
4851 					+ p;
4852 				if (enable)
4853 					r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
4854 							   irq_type);
4855 				else
4856 					r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
4857 							   irq_type);
4858 				if (r)
4859 					return r;
4860 			}
4861 		}
4862 	}
4863 
4864 	return 0;
4865 }
4866 
4867 static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
4868 {
4869 	struct amdgpu_device *adev = ip_block->adev;
4870 
4871 	cancel_delayed_work_sync(&adev->gfx.idle_work);
4872 
4873 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4874 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4875 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
4876 	gfx_v11_0_set_userq_eop_interrupts(adev, false);
4877 
4878 	if (!adev->no_hw_access) {
4879 		if (amdgpu_async_gfx_ring &&
4880 		    !adev->gfx.disable_kq) {
4881 			if (amdgpu_gfx_disable_kgq(adev, 0))
4882 				DRM_ERROR("KGQ disable failed\n");
4883 		}
4884 
4885 		if (amdgpu_gfx_disable_kcq(adev, 0))
4886 			DRM_ERROR("KCQ disable failed\n");
4887 
4888 		amdgpu_mes_kiq_hw_fini(adev);
4889 	}
4890 
4891 	if (amdgpu_sriov_vf(adev))
4892 		/* Remove the steps disabling CPG and clearing KIQ position,
4893 		 * so that CP could perform IDLE-SAVE during switch. Those
4894 		 * steps are necessary to avoid a DMAR error in gfx9 but it is
4895 		 * not reproduced on gfx11.
4896 		 */
4897 		return 0;
4898 
4899 	gfx_v11_0_cp_enable(adev, false);
4900 	gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4901 
4902 	adev->gfxhub.funcs->gart_disable(adev);
4903 
4904 	adev->gfx.is_poweron = false;
4905 
4906 	return 0;
4907 }
4908 
4909 static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block)
4910 {
4911 	return gfx_v11_0_hw_fini(ip_block);
4912 }
4913 
4914 static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
4915 {
4916 	return gfx_v11_0_hw_init(ip_block);
4917 }
4918 
4919 static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
4920 {
4921 	struct amdgpu_device *adev = ip_block->adev;
4922 
4923 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4924 				GRBM_STATUS, GUI_ACTIVE))
4925 		return false;
4926 	else
4927 		return true;
4928 }
4929 
4930 static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4931 {
4932 	unsigned i;
4933 	u32 tmp;
4934 	struct amdgpu_device *adev = ip_block->adev;
4935 
4936 	for (i = 0; i < adev->usec_timeout; i++) {
4937 		/* read MC_STATUS */
4938 		tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4939 			GRBM_STATUS__GUI_ACTIVE_MASK;
4940 
4941 		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4942 			return 0;
4943 		udelay(1);
4944 	}
4945 	return -ETIMEDOUT;
4946 }
4947 
4948 int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
4949 				      bool req)
4950 {
4951 	u32 i, tmp, val;
4952 
4953 	for (i = 0; i < adev->usec_timeout; i++) {
4954 		/* Request with MeId=2, PipeId=0 */
4955 		tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
4956 		tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
4957 		WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
4958 
4959 		val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
4960 		if (req) {
4961 			if (val == tmp)
4962 				break;
4963 		} else {
4964 			tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
4965 					    REQUEST, 1);
4966 
4967 			/* unlocked or locked by firmware */
4968 			if (val != tmp)
4969 				break;
4970 		}
4971 		udelay(1);
4972 	}
4973 
4974 	if (i >= adev->usec_timeout)
4975 		return -EINVAL;
4976 
4977 	return 0;
4978 }
4979 
4980 static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
4981 {
4982 	u32 grbm_soft_reset = 0;
4983 	u32 tmp;
4984 	int r, i, j, k;
4985 	struct amdgpu_device *adev = ip_block->adev;
4986 
4987 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4988 
4989 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4990 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4991 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4992 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4993 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4994 	WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4995 
4996 	mutex_lock(&adev->srbm_mutex);
4997 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4998 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4999 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
5000 				soc21_grbm_select(adev, i, k, j, 0);
5001 
5002 				WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
5003 				WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
5004 			}
5005 		}
5006 	}
5007 	for (i = 0; i < adev->gfx.me.num_me; ++i) {
5008 		for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
5009 			for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
5010 				soc21_grbm_select(adev, i, k, j, 0);
5011 
5012 				WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
5013 			}
5014 		}
5015 	}
5016 	soc21_grbm_select(adev, 0, 0, 0, 0);
5017 	mutex_unlock(&adev->srbm_mutex);
5018 
5019 	/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
5020 	mutex_lock(&adev->gfx.reset_sem_mutex);
5021 	r = gfx_v11_0_request_gfx_index_mutex(adev, true);
5022 	if (r) {
5023 		mutex_unlock(&adev->gfx.reset_sem_mutex);
5024 		DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
5025 		return r;
5026 	}
5027 
5028 	WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
5029 
5030 	// Read CP_VMID_RESET register three times.
5031 	// to get sufficient time for GFX_HQD_ACTIVE reach 0
5032 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
5033 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
5034 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
5035 
5036 	/* release the gfx mutex */
5037 	r = gfx_v11_0_request_gfx_index_mutex(adev, false);
5038 	mutex_unlock(&adev->gfx.reset_sem_mutex);
5039 	if (r) {
5040 		DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
5041 		return r;
5042 	}
5043 
5044 	for (i = 0; i < adev->usec_timeout; i++) {
5045 		if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
5046 		    !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
5047 			break;
5048 		udelay(1);
5049 	}
5050 	if (i >= adev->usec_timeout) {
5051 		printk("Failed to wait all pipes clean\n");
5052 		return -EINVAL;
5053 	}
5054 
5055 	/**********  trigger soft reset  ***********/
5056 	grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
5057 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5058 					SOFT_RESET_CP, 1);
5059 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5060 					SOFT_RESET_GFX, 1);
5061 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5062 					SOFT_RESET_CPF, 1);
5063 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5064 					SOFT_RESET_CPC, 1);
5065 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5066 					SOFT_RESET_CPG, 1);
5067 	WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
5068 	/**********  exit soft reset  ***********/
5069 	grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
5070 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5071 					SOFT_RESET_CP, 0);
5072 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5073 					SOFT_RESET_GFX, 0);
5074 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5075 					SOFT_RESET_CPF, 0);
5076 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5077 					SOFT_RESET_CPC, 0);
5078 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
5079 					SOFT_RESET_CPG, 0);
5080 	WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
5081 
5082 	tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
5083 	tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
5084 	WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
5085 
5086 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
5087 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
5088 
5089 	for (i = 0; i < adev->usec_timeout; i++) {
5090 		if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
5091 			break;
5092 		udelay(1);
5093 	}
5094 	if (i >= adev->usec_timeout) {
5095 		printk("Failed to wait CP_VMID_RESET to 0\n");
5096 		return -EINVAL;
5097 	}
5098 
5099 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
5100 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
5101 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
5102 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
5103 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
5104 	WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
5105 
5106 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5107 
5108 	return gfx_v11_0_cp_resume(adev);
5109 }
5110 
5111 static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
5112 {
5113 	int i, r;
5114 	struct amdgpu_device *adev = ip_block->adev;
5115 	struct amdgpu_ring *ring;
5116 	long tmo = msecs_to_jiffies(1000);
5117 
5118 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5119 		ring = &adev->gfx.gfx_ring[i];
5120 		r = amdgpu_ring_test_ib(ring, tmo);
5121 		if (r)
5122 			return true;
5123 	}
5124 
5125 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5126 		ring = &adev->gfx.compute_ring[i];
5127 		r = amdgpu_ring_test_ib(ring, tmo);
5128 		if (r)
5129 			return true;
5130 	}
5131 
5132 	return false;
5133 }
5134 
5135 static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
5136 {
5137 	struct amdgpu_device *adev = ip_block->adev;
5138 	/**
5139 	 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
5140 	 */
5141 	return amdgpu_mes_resume(adev);
5142 }
5143 
5144 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5145 {
5146 	uint64_t clock;
5147 	uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
5148 
5149 	if (amdgpu_sriov_vf(adev)) {
5150 		amdgpu_gfx_off_ctrl(adev, false);
5151 		mutex_lock(&adev->gfx.gpu_clock_mutex);
5152 		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
5153 		clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
5154 		clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
5155 		if (clock_counter_hi_pre != clock_counter_hi_after)
5156 			clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
5157 		mutex_unlock(&adev->gfx.gpu_clock_mutex);
5158 		amdgpu_gfx_off_ctrl(adev, true);
5159 	} else {
5160 		preempt_disable();
5161 		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
5162 		clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
5163 		clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
5164 		if (clock_counter_hi_pre != clock_counter_hi_after)
5165 			clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
5166 		preempt_enable();
5167 	}
5168 	clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
5169 
5170 	return clock;
5171 }
5172 
5173 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5174 					   uint32_t vmid,
5175 					   uint32_t gds_base, uint32_t gds_size,
5176 					   uint32_t gws_base, uint32_t gws_size,
5177 					   uint32_t oa_base, uint32_t oa_size)
5178 {
5179 	struct amdgpu_device *adev = ring->adev;
5180 
5181 	/* GDS Base */
5182 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5183 				    SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
5184 				    gds_base);
5185 
5186 	/* GDS Size */
5187 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5188 				    SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
5189 				    gds_size);
5190 
5191 	/* GWS */
5192 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5193 				    SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
5194 				    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5195 
5196 	/* OA */
5197 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5198 				    SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
5199 				    (1 << (oa_size + oa_base)) - (1 << oa_base));
5200 }
5201 
5202 static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
5203 {
5204 	struct amdgpu_device *adev = ip_block->adev;
5205 
5206 	switch (amdgpu_user_queue) {
5207 	case -1:
5208 	case 0:
5209 	default:
5210 		adev->gfx.disable_kq = false;
5211 		adev->gfx.disable_uq = true;
5212 		break;
5213 	case 1:
5214 		adev->gfx.disable_kq = false;
5215 		adev->gfx.disable_uq = false;
5216 		break;
5217 	case 2:
5218 		adev->gfx.disable_kq = true;
5219 		adev->gfx.disable_uq = false;
5220 		break;
5221 	}
5222 
5223 	adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
5224 
5225 	if (adev->gfx.disable_kq) {
5226 		/* We need one GFX ring temporarily to set up
5227 		 * the clear state.
5228 		 */
5229 		adev->gfx.num_gfx_rings = 1;
5230 		adev->gfx.num_compute_rings = 0;
5231 	} else {
5232 		adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
5233 		adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5234 						  AMDGPU_MAX_COMPUTE_RINGS);
5235 	}
5236 
5237 	gfx_v11_0_set_kiq_pm4_funcs(adev);
5238 	gfx_v11_0_set_ring_funcs(adev);
5239 	gfx_v11_0_set_irq_funcs(adev);
5240 	gfx_v11_0_set_gds_init(adev);
5241 	gfx_v11_0_set_rlc_funcs(adev);
5242 	gfx_v11_0_set_mqd_funcs(adev);
5243 	gfx_v11_0_set_imu_funcs(adev);
5244 
5245 	gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
5246 
5247 	return gfx_v11_0_init_microcode(adev);
5248 }
5249 
5250 static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
5251 {
5252 	struct amdgpu_device *adev = ip_block->adev;
5253 	int r;
5254 
5255 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5256 	if (r)
5257 		return r;
5258 
5259 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5260 	if (r)
5261 		return r;
5262 
5263 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
5264 	if (r)
5265 		return r;
5266 
5267 	r = gfx_v11_0_set_userq_eop_interrupts(adev, true);
5268 	if (r)
5269 		return r;
5270 
5271 	return 0;
5272 }
5273 
5274 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
5275 {
5276 	uint32_t rlc_cntl;
5277 
5278 	/* if RLC is not enabled, do nothing */
5279 	rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
5280 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
5281 }
5282 
5283 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
5284 {
5285 	uint32_t data;
5286 	unsigned i;
5287 
5288 	data = RLC_SAFE_MODE__CMD_MASK;
5289 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5290 
5291 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
5292 
5293 	/* wait for RLC_SAFE_MODE */
5294 	for (i = 0; i < adev->usec_timeout; i++) {
5295 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
5296 				   RLC_SAFE_MODE, CMD))
5297 			break;
5298 		udelay(1);
5299 	}
5300 }
5301 
5302 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
5303 {
5304 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
5305 }
5306 
5307 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
5308 				      bool enable)
5309 {
5310 	uint32_t def, data;
5311 
5312 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
5313 		return;
5314 
5315 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5316 
5317 	if (enable)
5318 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5319 	else
5320 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5321 
5322 	if (def != data)
5323 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5324 }
5325 
5326 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
5327 				       bool enable)
5328 {
5329 	uint32_t def, data;
5330 
5331 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
5332 		return;
5333 
5334 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5335 
5336 	if (enable)
5337 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5338 	else
5339 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5340 
5341 	if (def != data)
5342 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5343 }
5344 
5345 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
5346 					   bool enable)
5347 {
5348 	uint32_t def, data;
5349 
5350 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
5351 		return;
5352 
5353 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5354 
5355 	if (enable)
5356 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5357 	else
5358 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5359 
5360 	if (def != data)
5361 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5362 }
5363 
5364 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5365 						       bool enable)
5366 {
5367 	uint32_t data, def;
5368 
5369 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
5370 		return;
5371 
5372 	/* It is disabled by HW by default */
5373 	if (enable) {
5374 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5375 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
5376 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5377 
5378 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5379 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5380 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5381 
5382 			if (def != data)
5383 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5384 		}
5385 	} else {
5386 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5387 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5388 
5389 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5390 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5391 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5392 
5393 			if (def != data)
5394 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5395 		}
5396 	}
5397 }
5398 
5399 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5400 						       bool enable)
5401 {
5402 	uint32_t def, data;
5403 
5404 	if (!(adev->cg_flags &
5405 	      (AMD_CG_SUPPORT_GFX_CGCG |
5406 	      AMD_CG_SUPPORT_GFX_CGLS |
5407 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
5408 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
5409 		return;
5410 
5411 	if (enable) {
5412 		def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5413 
5414 		/* unset CGCG override */
5415 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5416 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5417 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5418 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5419 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
5420 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5421 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5422 
5423 		/* update CGCG override bits */
5424 		if (def != data)
5425 			WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5426 
5427 		/* enable cgcg FSM(0x0000363F) */
5428 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5429 
5430 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5431 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
5432 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5433 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5434 		}
5435 
5436 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5437 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
5438 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5439 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5440 		}
5441 
5442 		if (def != data)
5443 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5444 
5445 		/* Program RLC_CGCG_CGLS_CTRL_3D */
5446 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5447 
5448 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5449 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
5450 			data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5451 				 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5452 		}
5453 
5454 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5455 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
5456 			data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5457 				 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5458 		}
5459 
5460 		if (def != data)
5461 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5462 
5463 		/* set IDLE_POLL_COUNT(0x00900100) */
5464 		def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
5465 
5466 		data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
5467 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5468 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5469 
5470 		if (def != data)
5471 			WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
5472 
5473 		data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
5474 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
5475 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
5476 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
5477 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
5478 		WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
5479 
5480 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5481 		data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5482 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5483 
5484 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
5485 		if (adev->sdma.num_instances > 1) {
5486 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5487 			data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5488 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5489 		}
5490 	} else {
5491 		/* Program RLC_CGCG_CGLS_CTRL */
5492 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5493 
5494 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5495 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5496 
5497 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5498 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5499 
5500 		if (def != data)
5501 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5502 
5503 		/* Program RLC_CGCG_CGLS_CTRL_3D */
5504 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5505 
5506 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5507 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5508 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5509 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5510 
5511 		if (def != data)
5512 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5513 
5514 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5515 		data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5516 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5517 
5518 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
5519 		if (adev->sdma.num_instances > 1) {
5520 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5521 			data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5522 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5523 		}
5524 	}
5525 }
5526 
5527 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5528 					    bool enable)
5529 {
5530 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5531 
5532 	gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
5533 
5534 	gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
5535 
5536 	gfx_v11_0_update_repeater_fgcg(adev, enable);
5537 
5538 	gfx_v11_0_update_sram_fgcg(adev, enable);
5539 
5540 	gfx_v11_0_update_perf_clk(adev, enable);
5541 
5542 	if (adev->cg_flags &
5543 	    (AMD_CG_SUPPORT_GFX_MGCG |
5544 	     AMD_CG_SUPPORT_GFX_CGLS |
5545 	     AMD_CG_SUPPORT_GFX_CGCG |
5546 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
5547 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
5548 	        gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
5549 
5550 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5551 
5552 	return 0;
5553 }
5554 
5555 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
5556 {
5557 	u32 reg, pre_data, data;
5558 
5559 	amdgpu_gfx_off_ctrl(adev, false);
5560 	reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
5561 	if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
5562 		pre_data = RREG32_NO_KIQ(reg);
5563 	else
5564 		pre_data = RREG32(reg);
5565 
5566 	data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
5567 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5568 
5569 	if (pre_data != data) {
5570 		if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
5571 			WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
5572 		} else
5573 			WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
5574 	}
5575 	amdgpu_gfx_off_ctrl(adev, true);
5576 
5577 	if (ring
5578 		&& amdgpu_sriov_is_pp_one_vf(adev)
5579 		&& (pre_data != data)
5580 		&& ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
5581 			|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
5582 		amdgpu_ring_emit_wreg(ring, reg, data);
5583 	}
5584 }
5585 
5586 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5587 	.is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5588 	.set_safe_mode = gfx_v11_0_set_safe_mode,
5589 	.unset_safe_mode = gfx_v11_0_unset_safe_mode,
5590 	.init = gfx_v11_0_rlc_init,
5591 	.get_csb_size = gfx_v11_0_get_csb_size,
5592 	.get_csb_buffer = gfx_v11_0_get_csb_buffer,
5593 	.resume = gfx_v11_0_rlc_resume,
5594 	.stop = gfx_v11_0_rlc_stop,
5595 	.reset = gfx_v11_0_rlc_reset,
5596 	.start = gfx_v11_0_rlc_start,
5597 	.update_spm_vmid = gfx_v11_0_update_spm_vmid,
5598 };
5599 
5600 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
5601 {
5602 	u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
5603 
5604 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5605 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5606 	else
5607 		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5608 
5609 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5610 
5611 	// Program RLC_PG_DELAY3 for CGPG hysteresis
5612 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5613 		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5614 		case IP_VERSION(11, 0, 1):
5615 		case IP_VERSION(11, 0, 4):
5616 		case IP_VERSION(11, 5, 0):
5617 		case IP_VERSION(11, 5, 1):
5618 		case IP_VERSION(11, 5, 2):
5619 		case IP_VERSION(11, 5, 3):
5620 			WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5621 			break;
5622 		default:
5623 			break;
5624 		}
5625 	}
5626 }
5627 
5628 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5629 {
5630 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5631 
5632 	gfx_v11_cntl_power_gating(adev, enable);
5633 
5634 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5635 }
5636 
5637 static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
5638 					   enum amd_powergating_state state)
5639 {
5640 	struct amdgpu_device *adev = ip_block->adev;
5641 	bool enable = (state == AMD_PG_STATE_GATE);
5642 
5643 	if (amdgpu_sriov_vf(adev))
5644 		return 0;
5645 
5646 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5647 	case IP_VERSION(11, 0, 0):
5648 	case IP_VERSION(11, 0, 2):
5649 	case IP_VERSION(11, 0, 3):
5650 		amdgpu_gfx_off_ctrl(adev, enable);
5651 		break;
5652 	case IP_VERSION(11, 0, 1):
5653 	case IP_VERSION(11, 0, 4):
5654 	case IP_VERSION(11, 5, 0):
5655 	case IP_VERSION(11, 5, 1):
5656 	case IP_VERSION(11, 5, 2):
5657 	case IP_VERSION(11, 5, 3):
5658 		if (!enable)
5659 			amdgpu_gfx_off_ctrl(adev, false);
5660 
5661 		gfx_v11_cntl_pg(adev, enable);
5662 
5663 		if (enable)
5664 			amdgpu_gfx_off_ctrl(adev, true);
5665 
5666 		break;
5667 	default:
5668 		break;
5669 	}
5670 
5671 	return 0;
5672 }
5673 
5674 static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
5675 					  enum amd_clockgating_state state)
5676 {
5677 	struct amdgpu_device *adev = ip_block->adev;
5678 
5679 	if (amdgpu_sriov_vf(adev))
5680 	        return 0;
5681 
5682 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5683 	case IP_VERSION(11, 0, 0):
5684 	case IP_VERSION(11, 0, 1):
5685 	case IP_VERSION(11, 0, 2):
5686 	case IP_VERSION(11, 0, 3):
5687 	case IP_VERSION(11, 0, 4):
5688 	case IP_VERSION(11, 5, 0):
5689 	case IP_VERSION(11, 5, 1):
5690 	case IP_VERSION(11, 5, 2):
5691 	case IP_VERSION(11, 5, 3):
5692 	        gfx_v11_0_update_gfx_clock_gating(adev,
5693 	                        state ==  AMD_CG_STATE_GATE);
5694 	        break;
5695 	default:
5696 	        break;
5697 	}
5698 
5699 	return 0;
5700 }
5701 
5702 static void gfx_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
5703 {
5704 	struct amdgpu_device *adev = ip_block->adev;
5705 	int data;
5706 
5707 	/* AMD_CG_SUPPORT_GFX_MGCG */
5708 	data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5709 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5710 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5711 
5712 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
5713 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5714 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5715 
5716 	/* AMD_CG_SUPPORT_GFX_FGCG */
5717 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5718 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
5719 
5720 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
5721 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5722 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5723 
5724 	/* AMD_CG_SUPPORT_GFX_CGCG */
5725 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5726 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5727 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5728 
5729 	/* AMD_CG_SUPPORT_GFX_CGLS */
5730 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5731 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5732 
5733 	/* AMD_CG_SUPPORT_GFX_3D_CGCG */
5734 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5735 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5736 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5737 
5738 	/* AMD_CG_SUPPORT_GFX_3D_CGLS */
5739 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5740 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5741 }
5742 
5743 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5744 {
5745 	/* gfx11 is 32bit rptr*/
5746 	return *(uint32_t *)ring->rptr_cpu_addr;
5747 }
5748 
5749 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5750 {
5751 	struct amdgpu_device *adev = ring->adev;
5752 	u64 wptr;
5753 
5754 	/* XXX check if swapping is necessary on BE */
5755 	if (ring->use_doorbell) {
5756 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5757 	} else {
5758 		wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5759 		wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5760 	}
5761 
5762 	return wptr;
5763 }
5764 
5765 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5766 {
5767 	struct amdgpu_device *adev = ring->adev;
5768 
5769 	if (ring->use_doorbell) {
5770 		/* XXX check if swapping is necessary on BE */
5771 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5772 			     ring->wptr);
5773 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5774 	} else {
5775 		WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5776 			     lower_32_bits(ring->wptr));
5777 		WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5778 			     upper_32_bits(ring->wptr));
5779 	}
5780 }
5781 
5782 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5783 {
5784 	/* gfx11 hardware is 32bit rptr */
5785 	return *(uint32_t *)ring->rptr_cpu_addr;
5786 }
5787 
5788 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5789 {
5790 	u64 wptr;
5791 
5792 	/* XXX check if swapping is necessary on BE */
5793 	if (ring->use_doorbell)
5794 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5795 	else
5796 		BUG();
5797 	return wptr;
5798 }
5799 
5800 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5801 {
5802 	struct amdgpu_device *adev = ring->adev;
5803 
5804 	/* XXX check if swapping is necessary on BE */
5805 	if (ring->use_doorbell) {
5806 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5807 			     ring->wptr);
5808 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5809 	} else {
5810 		BUG(); /* only DOORBELL method supported on gfx11 now */
5811 	}
5812 }
5813 
5814 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5815 {
5816 	struct amdgpu_device *adev = ring->adev;
5817 	u32 ref_and_mask, reg_mem_engine;
5818 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5819 
5820 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5821 		switch (ring->me) {
5822 		case 1:
5823 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5824 			break;
5825 		case 2:
5826 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5827 			break;
5828 		default:
5829 			return;
5830 		}
5831 		reg_mem_engine = 0;
5832 	} else {
5833 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
5834 		reg_mem_engine = 1; /* pfp */
5835 	}
5836 
5837 	gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5838 			       adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5839 			       adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5840 			       ref_and_mask, ref_and_mask, 0x20);
5841 }
5842 
5843 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5844 				       struct amdgpu_job *job,
5845 				       struct amdgpu_ib *ib,
5846 				       uint32_t flags)
5847 {
5848 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5849 	u32 header, control = 0;
5850 
5851 	BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5852 
5853 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5854 
5855 	control |= ib->length_dw | (vmid << 24);
5856 
5857 	if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5858 		control |= INDIRECT_BUFFER_PRE_ENB(1);
5859 
5860 		if (flags & AMDGPU_IB_PREEMPTED)
5861 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
5862 
5863 		if (vmid)
5864 			gfx_v11_0_ring_emit_de_meta(ring,
5865 				    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5866 	}
5867 
5868 	amdgpu_ring_write(ring, header);
5869 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5870 	amdgpu_ring_write(ring,
5871 #ifdef __BIG_ENDIAN
5872 		(2 << 0) |
5873 #endif
5874 		lower_32_bits(ib->gpu_addr));
5875 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5876 	amdgpu_ring_write(ring, control);
5877 }
5878 
5879 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5880 					   struct amdgpu_job *job,
5881 					   struct amdgpu_ib *ib,
5882 					   uint32_t flags)
5883 {
5884 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5885 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5886 
5887 	/* Currently, there is a high possibility to get wave ID mismatch
5888 	 * between ME and GDS, leading to a hw deadlock, because ME generates
5889 	 * different wave IDs than the GDS expects. This situation happens
5890 	 * randomly when at least 5 compute pipes use GDS ordered append.
5891 	 * The wave IDs generated by ME are also wrong after suspend/resume.
5892 	 * Those are probably bugs somewhere else in the kernel driver.
5893 	 *
5894 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5895 	 * GDS to 0 for this ring (me/pipe).
5896 	 */
5897 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5898 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5899 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5900 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5901 	}
5902 
5903 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5904 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5905 	amdgpu_ring_write(ring,
5906 #ifdef __BIG_ENDIAN
5907 				(2 << 0) |
5908 #endif
5909 				lower_32_bits(ib->gpu_addr));
5910 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5911 	amdgpu_ring_write(ring, control);
5912 }
5913 
5914 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5915 				     u64 seq, unsigned flags)
5916 {
5917 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5918 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5919 
5920 	/* RELEASE_MEM - flush caches, send int */
5921 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5922 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5923 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5924 				 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
5925 				 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5926 				 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5927 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5928 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5929 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5930 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5931 
5932 	/*
5933 	 * the address should be Qword aligned if 64bit write, Dword
5934 	 * aligned if only send 32bit data low (discard data high)
5935 	 */
5936 	if (write64bit)
5937 		BUG_ON(addr & 0x7);
5938 	else
5939 		BUG_ON(addr & 0x3);
5940 	amdgpu_ring_write(ring, lower_32_bits(addr));
5941 	amdgpu_ring_write(ring, upper_32_bits(addr));
5942 	amdgpu_ring_write(ring, lower_32_bits(seq));
5943 	amdgpu_ring_write(ring, upper_32_bits(seq));
5944 	amdgpu_ring_write(ring, 0);
5945 }
5946 
5947 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5948 {
5949 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5950 	uint32_t seq = ring->fence_drv.sync_seq;
5951 	uint64_t addr = ring->fence_drv.gpu_addr;
5952 
5953 	gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5954 			       upper_32_bits(addr), seq, 0xffffffff, 4);
5955 }
5956 
5957 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5958 				   uint16_t pasid, uint32_t flush_type,
5959 				   bool all_hub, uint8_t dst_sel)
5960 {
5961 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5962 	amdgpu_ring_write(ring,
5963 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5964 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5965 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5966 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5967 }
5968 
5969 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5970 					 unsigned vmid, uint64_t pd_addr)
5971 {
5972 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5973 
5974 	/* compute doesn't have PFP */
5975 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5976 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
5977 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5978 		amdgpu_ring_write(ring, 0x0);
5979 	}
5980 
5981 	/* Make sure that we can't skip the SET_Q_MODE packets when the VM
5982 	 * changed in any way.
5983 	 */
5984 	ring->set_q_mode_offs = 0;
5985 	ring->set_q_mode_ptr = NULL;
5986 }
5987 
5988 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5989 					  u64 seq, unsigned int flags)
5990 {
5991 	struct amdgpu_device *adev = ring->adev;
5992 
5993 	/* we only allocate 32bit for each seq wb address */
5994 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5995 
5996 	/* write fence seq to the "addr" */
5997 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5998 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5999 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6000 	amdgpu_ring_write(ring, lower_32_bits(addr));
6001 	amdgpu_ring_write(ring, upper_32_bits(addr));
6002 	amdgpu_ring_write(ring, lower_32_bits(seq));
6003 
6004 	if (flags & AMDGPU_FENCE_FLAG_INT) {
6005 		/* set register to trigger INT */
6006 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6007 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6008 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6009 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
6010 		amdgpu_ring_write(ring, 0);
6011 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6012 	}
6013 }
6014 
6015 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
6016 					 uint32_t flags)
6017 {
6018 	uint32_t dw2 = 0;
6019 
6020 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6021 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6022 		/* set load_global_config & load_global_uconfig */
6023 		dw2 |= 0x8001;
6024 		/* set load_cs_sh_regs */
6025 		dw2 |= 0x01000000;
6026 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
6027 		dw2 |= 0x10002;
6028 	}
6029 
6030 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6031 	amdgpu_ring_write(ring, dw2);
6032 	amdgpu_ring_write(ring, 0);
6033 }
6034 
6035 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
6036 						   uint64_t addr)
6037 {
6038 	unsigned ret;
6039 
6040 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6041 	amdgpu_ring_write(ring, lower_32_bits(addr));
6042 	amdgpu_ring_write(ring, upper_32_bits(addr));
6043 	/* discard following DWs if *cond_exec_gpu_addr==0 */
6044 	amdgpu_ring_write(ring, 0);
6045 	ret = ring->wptr & ring->buf_mask;
6046 	/* patch dummy value later */
6047 	amdgpu_ring_write(ring, 0);
6048 
6049 	return ret;
6050 }
6051 
6052 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
6053 					   u64 shadow_va, u64 csa_va,
6054 					   u64 gds_va, bool init_shadow,
6055 					   int vmid)
6056 {
6057 	struct amdgpu_device *adev = ring->adev;
6058 	unsigned int offs, end;
6059 
6060 	if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj)
6061 		return;
6062 
6063 	/*
6064 	 * The logic here isn't easy to understand because we need to keep state
6065 	 * accross multiple executions of the function as well as between the
6066 	 * CPU and GPU. The general idea is that the newly written GPU command
6067 	 * has a condition on the previous one and only executed if really
6068 	 * necessary.
6069 	 */
6070 
6071 	/*
6072 	 * The dw in the NOP controls if the next SET_Q_MODE packet should be
6073 	 * executed or not. Reserve 64bits just to be on the save side.
6074 	 */
6075 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1));
6076 	offs = ring->wptr & ring->buf_mask;
6077 
6078 	/*
6079 	 * We start with skipping the prefix SET_Q_MODE and always executing
6080 	 * the postfix SET_Q_MODE packet. This is changed below with a
6081 	 * WRITE_DATA command when the postfix executed.
6082 	 */
6083 	amdgpu_ring_write(ring, shadow_va ? 1 : 0);
6084 	amdgpu_ring_write(ring, 0);
6085 
6086 	if (ring->set_q_mode_offs) {
6087 		uint64_t addr;
6088 
6089 		addr = amdgpu_bo_gpu_offset(ring->ring_obj);
6090 		addr += ring->set_q_mode_offs << 2;
6091 		end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr);
6092 	}
6093 
6094 	/*
6095 	 * When the postfix SET_Q_MODE packet executes we need to make sure that the
6096 	 * next prefix SET_Q_MODE packet executes as well.
6097 	 */
6098 	if (!shadow_va) {
6099 		uint64_t addr;
6100 
6101 		addr = amdgpu_bo_gpu_offset(ring->ring_obj);
6102 		addr += offs << 2;
6103 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6104 		amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
6105 		amdgpu_ring_write(ring, lower_32_bits(addr));
6106 		amdgpu_ring_write(ring, upper_32_bits(addr));
6107 		amdgpu_ring_write(ring, 0x1);
6108 	}
6109 
6110 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
6111 	amdgpu_ring_write(ring, lower_32_bits(shadow_va));
6112 	amdgpu_ring_write(ring, upper_32_bits(shadow_va));
6113 	amdgpu_ring_write(ring, lower_32_bits(gds_va));
6114 	amdgpu_ring_write(ring, upper_32_bits(gds_va));
6115 	amdgpu_ring_write(ring, lower_32_bits(csa_va));
6116 	amdgpu_ring_write(ring, upper_32_bits(csa_va));
6117 	amdgpu_ring_write(ring, shadow_va ?
6118 			  PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
6119 	amdgpu_ring_write(ring, init_shadow ?
6120 			  PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
6121 
6122 	if (ring->set_q_mode_offs)
6123 		amdgpu_ring_patch_cond_exec(ring, end);
6124 
6125 	if (shadow_va) {
6126 		uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid;
6127 
6128 		/*
6129 		 * If the tokens match try to skip the last postfix SET_Q_MODE
6130 		 * packet to avoid saving/restoring the state all the time.
6131 		 */
6132 		if (ring->set_q_mode_ptr && ring->set_q_mode_token == token)
6133 			*ring->set_q_mode_ptr = 0;
6134 
6135 		ring->set_q_mode_token = token;
6136 	} else {
6137 		ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs];
6138 	}
6139 
6140 	ring->set_q_mode_offs = offs;
6141 }
6142 
6143 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
6144 {
6145 	int i, r = 0;
6146 	struct amdgpu_device *adev = ring->adev;
6147 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
6148 	struct amdgpu_ring *kiq_ring = &kiq->ring;
6149 	unsigned long flags;
6150 
6151 	if (adev->enable_mes)
6152 		return -EINVAL;
6153 
6154 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
6155 		return -EINVAL;
6156 
6157 	spin_lock_irqsave(&kiq->ring_lock, flags);
6158 
6159 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
6160 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
6161 		return -ENOMEM;
6162 	}
6163 
6164 	/* assert preemption condition */
6165 	amdgpu_ring_set_preempt_cond_exec(ring, false);
6166 
6167 	/* assert IB preemption, emit the trailing fence */
6168 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
6169 				   ring->trail_fence_gpu_addr,
6170 				   ++ring->trail_seq);
6171 	amdgpu_ring_commit(kiq_ring);
6172 
6173 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
6174 
6175 	/* poll the trailing fence */
6176 	for (i = 0; i < adev->usec_timeout; i++) {
6177 		if (ring->trail_seq ==
6178 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
6179 			break;
6180 		udelay(1);
6181 	}
6182 
6183 	if (i >= adev->usec_timeout) {
6184 		r = -EINVAL;
6185 		DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
6186 	}
6187 
6188 	/* deassert preemption condition */
6189 	amdgpu_ring_set_preempt_cond_exec(ring, true);
6190 	return r;
6191 }
6192 
6193 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
6194 {
6195 	struct amdgpu_device *adev = ring->adev;
6196 	struct v10_de_ib_state de_payload = {0};
6197 	uint64_t offset, gds_addr, de_payload_gpu_addr;
6198 	void *de_payload_cpu_addr;
6199 	int cnt;
6200 
6201 	offset = offsetof(struct v10_gfx_meta_data, de_payload);
6202 	de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
6203 	de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
6204 
6205 	gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
6206 			 AMDGPU_CSA_SIZE - adev->gds.gds_size,
6207 			 PAGE_SIZE);
6208 
6209 	de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
6210 	de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
6211 
6212 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
6213 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
6214 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
6215 				 WRITE_DATA_DST_SEL(8) |
6216 				 WR_CONFIRM) |
6217 				 WRITE_DATA_CACHE_POLICY(0));
6218 	amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
6219 	amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
6220 
6221 	if (resume)
6222 		amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
6223 					   sizeof(de_payload) >> 2);
6224 	else
6225 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
6226 					   sizeof(de_payload) >> 2);
6227 }
6228 
6229 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
6230 				    bool secure)
6231 {
6232 	uint32_t v = secure ? FRAME_TMZ : 0;
6233 
6234 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
6235 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
6236 }
6237 
6238 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6239 				     uint32_t reg_val_offs)
6240 {
6241 	struct amdgpu_device *adev = ring->adev;
6242 
6243 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6244 	amdgpu_ring_write(ring, 0 |	/* src: register*/
6245 				(5 << 8) |	/* dst: memory */
6246 				(1 << 20));	/* write confirm */
6247 	amdgpu_ring_write(ring, reg);
6248 	amdgpu_ring_write(ring, 0);
6249 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6250 				reg_val_offs * 4));
6251 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6252 				reg_val_offs * 4));
6253 }
6254 
6255 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6256 				   uint32_t val)
6257 {
6258 	uint32_t cmd = 0;
6259 
6260 	switch (ring->funcs->type) {
6261 	case AMDGPU_RING_TYPE_GFX:
6262 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6263 		break;
6264 	case AMDGPU_RING_TYPE_KIQ:
6265 		cmd = (1 << 16); /* no inc addr */
6266 		break;
6267 	default:
6268 		cmd = WR_CONFIRM;
6269 		break;
6270 	}
6271 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6272 	amdgpu_ring_write(ring, cmd);
6273 	amdgpu_ring_write(ring, reg);
6274 	amdgpu_ring_write(ring, 0);
6275 	amdgpu_ring_write(ring, val);
6276 }
6277 
6278 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
6279 					uint32_t val, uint32_t mask)
6280 {
6281 	gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
6282 }
6283 
6284 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
6285 						   uint32_t reg0, uint32_t reg1,
6286 						   uint32_t ref, uint32_t mask)
6287 {
6288 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6289 
6290 	gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
6291 			       ref, mask, 0x20);
6292 }
6293 
6294 static void
6295 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6296 				      uint32_t me, uint32_t pipe,
6297 				      enum amdgpu_interrupt_state state)
6298 {
6299 	uint32_t cp_int_cntl, cp_int_cntl_reg;
6300 
6301 	if (!me) {
6302 		switch (pipe) {
6303 		case 0:
6304 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
6305 			break;
6306 		case 1:
6307 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
6308 			break;
6309 		default:
6310 			DRM_DEBUG("invalid pipe %d\n", pipe);
6311 			return;
6312 		}
6313 	} else {
6314 		DRM_DEBUG("invalid me %d\n", me);
6315 		return;
6316 	}
6317 
6318 	switch (state) {
6319 	case AMDGPU_IRQ_STATE_DISABLE:
6320 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6321 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6322 					    TIME_STAMP_INT_ENABLE, 0);
6323 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6324 					    GENERIC0_INT_ENABLE, 0);
6325 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6326 		break;
6327 	case AMDGPU_IRQ_STATE_ENABLE:
6328 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6329 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6330 					    TIME_STAMP_INT_ENABLE, 1);
6331 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6332 					    GENERIC0_INT_ENABLE, 1);
6333 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6334 		break;
6335 	default:
6336 		break;
6337 	}
6338 }
6339 
6340 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6341 						     int me, int pipe,
6342 						     enum amdgpu_interrupt_state state)
6343 {
6344 	u32 mec_int_cntl, mec_int_cntl_reg;
6345 
6346 	/*
6347 	 * amdgpu controls only the first MEC. That's why this function only
6348 	 * handles the setting of interrupts for this specific MEC. All other
6349 	 * pipes' interrupts are set by amdkfd.
6350 	 */
6351 
6352 	if (me == 1) {
6353 		switch (pipe) {
6354 		case 0:
6355 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6356 			break;
6357 		case 1:
6358 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
6359 			break;
6360 		case 2:
6361 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
6362 			break;
6363 		case 3:
6364 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
6365 			break;
6366 		default:
6367 			DRM_DEBUG("invalid pipe %d\n", pipe);
6368 			return;
6369 		}
6370 	} else {
6371 		DRM_DEBUG("invalid me %d\n", me);
6372 		return;
6373 	}
6374 
6375 	switch (state) {
6376 	case AMDGPU_IRQ_STATE_DISABLE:
6377 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6378 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6379 					     TIME_STAMP_INT_ENABLE, 0);
6380 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6381 					     GENERIC0_INT_ENABLE, 0);
6382 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6383 		break;
6384 	case AMDGPU_IRQ_STATE_ENABLE:
6385 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6386 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6387 					     TIME_STAMP_INT_ENABLE, 1);
6388 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6389 					     GENERIC0_INT_ENABLE, 1);
6390 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6391 		break;
6392 	default:
6393 		break;
6394 	}
6395 }
6396 
6397 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6398 					    struct amdgpu_irq_src *src,
6399 					    unsigned type,
6400 					    enum amdgpu_interrupt_state state)
6401 {
6402 	switch (type) {
6403 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6404 		gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
6405 		break;
6406 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
6407 		gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
6408 		break;
6409 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6410 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6411 		break;
6412 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6413 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6414 		break;
6415 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6416 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6417 		break;
6418 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6419 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6420 		break;
6421 	default:
6422 		break;
6423 	}
6424 	return 0;
6425 }
6426 
6427 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
6428 			     struct amdgpu_irq_src *source,
6429 			     struct amdgpu_iv_entry *entry)
6430 {
6431 	u32 doorbell_offset = entry->src_data[0];
6432 	u8 me_id, pipe_id, queue_id;
6433 	struct amdgpu_ring *ring;
6434 	int i;
6435 
6436 	DRM_DEBUG("IH: CP EOP\n");
6437 
6438 	if (adev->enable_mes && doorbell_offset) {
6439 		struct amdgpu_userq_fence_driver *fence_drv = NULL;
6440 		struct xarray *xa = &adev->userq_xa;
6441 		unsigned long flags;
6442 
6443 		xa_lock_irqsave(xa, flags);
6444 		fence_drv = xa_load(xa, doorbell_offset);
6445 		if (fence_drv)
6446 			amdgpu_userq_fence_driver_process(fence_drv);
6447 		xa_unlock_irqrestore(xa, flags);
6448 	} else {
6449 		me_id = (entry->ring_id & 0x0c) >> 2;
6450 		pipe_id = (entry->ring_id & 0x03) >> 0;
6451 		queue_id = (entry->ring_id & 0x70) >> 4;
6452 
6453 		switch (me_id) {
6454 		case 0:
6455 			if (pipe_id == 0)
6456 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6457 			else
6458 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
6459 			break;
6460 		case 1:
6461 		case 2:
6462 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6463 				ring = &adev->gfx.compute_ring[i];
6464 				/* Per-queue interrupt is supported for MEC starting from VI.
6465 				 * The interrupt can only be enabled/disabled per pipe instead
6466 				 * of per queue.
6467 				 */
6468 				if ((ring->me == me_id) &&
6469 				    (ring->pipe == pipe_id) &&
6470 				    (ring->queue == queue_id))
6471 					amdgpu_fence_process(ring);
6472 			}
6473 			break;
6474 		}
6475 	}
6476 
6477 	return 0;
6478 }
6479 
6480 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6481 					      struct amdgpu_irq_src *source,
6482 					      unsigned int type,
6483 					      enum amdgpu_interrupt_state state)
6484 {
6485 	u32 cp_int_cntl_reg, cp_int_cntl;
6486 	int i, j;
6487 
6488 	switch (state) {
6489 	case AMDGPU_IRQ_STATE_DISABLE:
6490 	case AMDGPU_IRQ_STATE_ENABLE:
6491 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6492 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6493 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6494 
6495 				if (cp_int_cntl_reg) {
6496 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6497 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6498 								    PRIV_REG_INT_ENABLE,
6499 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6500 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6501 				}
6502 			}
6503 		}
6504 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6505 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6506 				/* MECs start at 1 */
6507 				cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
6508 
6509 				if (cp_int_cntl_reg) {
6510 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6511 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6512 								    PRIV_REG_INT_ENABLE,
6513 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6514 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6515 				}
6516 			}
6517 		}
6518 		break;
6519 	default:
6520 		break;
6521 	}
6522 
6523 	return 0;
6524 }
6525 
6526 static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev,
6527 					    struct amdgpu_irq_src *source,
6528 					    unsigned type,
6529 					    enum amdgpu_interrupt_state state)
6530 {
6531 	u32 cp_int_cntl_reg, cp_int_cntl;
6532 	int i, j;
6533 
6534 	switch (state) {
6535 	case AMDGPU_IRQ_STATE_DISABLE:
6536 	case AMDGPU_IRQ_STATE_ENABLE:
6537 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6538 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6539 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6540 
6541 				if (cp_int_cntl_reg) {
6542 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6543 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6544 								    OPCODE_ERROR_INT_ENABLE,
6545 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6546 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6547 				}
6548 			}
6549 		}
6550 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6551 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6552 				/* MECs start at 1 */
6553 				cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
6554 
6555 				if (cp_int_cntl_reg) {
6556 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6557 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6558 								    OPCODE_ERROR_INT_ENABLE,
6559 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6560 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6561 				}
6562 			}
6563 		}
6564 		break;
6565 	default:
6566 		break;
6567 	}
6568 	return 0;
6569 }
6570 
6571 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6572 					       struct amdgpu_irq_src *source,
6573 					       unsigned int type,
6574 					       enum amdgpu_interrupt_state state)
6575 {
6576 	u32 cp_int_cntl_reg, cp_int_cntl;
6577 	int i, j;
6578 
6579 	switch (state) {
6580 	case AMDGPU_IRQ_STATE_DISABLE:
6581 	case AMDGPU_IRQ_STATE_ENABLE:
6582 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6583 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6584 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6585 
6586 				if (cp_int_cntl_reg) {
6587 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6588 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6589 								    PRIV_INSTR_INT_ENABLE,
6590 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6591 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6592 				}
6593 			}
6594 		}
6595 		break;
6596 	default:
6597 		break;
6598 	}
6599 
6600 	return 0;
6601 }
6602 
6603 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
6604 					struct amdgpu_iv_entry *entry)
6605 {
6606 	u8 me_id, pipe_id, queue_id;
6607 	struct amdgpu_ring *ring;
6608 	int i;
6609 
6610 	me_id = (entry->ring_id & 0x0c) >> 2;
6611 	pipe_id = (entry->ring_id & 0x03) >> 0;
6612 	queue_id = (entry->ring_id & 0x70) >> 4;
6613 
6614 	if (!adev->gfx.disable_kq) {
6615 		switch (me_id) {
6616 		case 0:
6617 			for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
6618 				ring = &adev->gfx.gfx_ring[i];
6619 				if (ring->me == me_id && ring->pipe == pipe_id &&
6620 				    ring->queue == queue_id)
6621 					drm_sched_fault(&ring->sched);
6622 			}
6623 			break;
6624 		case 1:
6625 		case 2:
6626 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6627 				ring = &adev->gfx.compute_ring[i];
6628 				if (ring->me == me_id && ring->pipe == pipe_id &&
6629 				    ring->queue == queue_id)
6630 					drm_sched_fault(&ring->sched);
6631 			}
6632 			break;
6633 		default:
6634 			BUG();
6635 			break;
6636 		}
6637 	}
6638 }
6639 
6640 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
6641 				  struct amdgpu_irq_src *source,
6642 				  struct amdgpu_iv_entry *entry)
6643 {
6644 	DRM_ERROR("Illegal register access in command stream\n");
6645 	gfx_v11_0_handle_priv_fault(adev, entry);
6646 	return 0;
6647 }
6648 
6649 static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev,
6650 				struct amdgpu_irq_src *source,
6651 				struct amdgpu_iv_entry *entry)
6652 {
6653 	DRM_ERROR("Illegal opcode in command stream \n");
6654 	gfx_v11_0_handle_priv_fault(adev, entry);
6655 	return 0;
6656 }
6657 
6658 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
6659 				   struct amdgpu_irq_src *source,
6660 				   struct amdgpu_iv_entry *entry)
6661 {
6662 	DRM_ERROR("Illegal instruction in command stream\n");
6663 	gfx_v11_0_handle_priv_fault(adev, entry);
6664 	return 0;
6665 }
6666 
6667 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
6668 				  struct amdgpu_irq_src *source,
6669 				  struct amdgpu_iv_entry *entry)
6670 {
6671 	if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
6672 		return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
6673 
6674 	return 0;
6675 }
6676 
6677 #if 0
6678 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6679 					     struct amdgpu_irq_src *src,
6680 					     unsigned int type,
6681 					     enum amdgpu_interrupt_state state)
6682 {
6683 	uint32_t tmp, target;
6684 	struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
6685 
6686 	target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6687 	target += ring->pipe;
6688 
6689 	switch (type) {
6690 	case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6691 		if (state == AMDGPU_IRQ_STATE_DISABLE) {
6692 			tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6693 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6694 					    GENERIC2_INT_ENABLE, 0);
6695 			WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6696 
6697 			tmp = RREG32_SOC15_IP(GC, target);
6698 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6699 					    GENERIC2_INT_ENABLE, 0);
6700 			WREG32_SOC15_IP(GC, target, tmp);
6701 		} else {
6702 			tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6703 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6704 					    GENERIC2_INT_ENABLE, 1);
6705 			WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6706 
6707 			tmp = RREG32_SOC15_IP(GC, target);
6708 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6709 					    GENERIC2_INT_ENABLE, 1);
6710 			WREG32_SOC15_IP(GC, target, tmp);
6711 		}
6712 		break;
6713 	default:
6714 		BUG(); /* kiq only support GENERIC2_INT now */
6715 		break;
6716 	}
6717 	return 0;
6718 }
6719 #endif
6720 
6721 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6722 {
6723 	const unsigned int gcr_cntl =
6724 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6725 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6726 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6727 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6728 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6729 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6730 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6731 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6732 
6733 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6734 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6735 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6736 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6737 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6738 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6739 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6740 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6741 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6742 }
6743 
6744 static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev)
6745 {
6746 	/* Disable the pipe reset until the CPFW fully support it.*/
6747 	dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
6748 	return false;
6749 }
6750 
6751 
6752 static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
6753 {
6754 	struct amdgpu_device *adev = ring->adev;
6755 	uint32_t reset_pipe = 0, clean_pipe = 0;
6756 	int r;
6757 
6758 	if (!gfx_v11_pipe_reset_support(adev))
6759 		return -EOPNOTSUPP;
6760 
6761 	gfx_v11_0_set_safe_mode(adev, 0);
6762 	mutex_lock(&adev->srbm_mutex);
6763 	soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
6764 
6765 	switch (ring->pipe) {
6766 	case 0:
6767 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
6768 					   PFP_PIPE0_RESET, 1);
6769 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
6770 					   ME_PIPE0_RESET, 1);
6771 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
6772 					   PFP_PIPE0_RESET, 0);
6773 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
6774 					   ME_PIPE0_RESET, 0);
6775 		break;
6776 	case 1:
6777 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
6778 					   PFP_PIPE1_RESET, 1);
6779 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
6780 					   ME_PIPE1_RESET, 1);
6781 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
6782 					   PFP_PIPE1_RESET, 0);
6783 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
6784 					   ME_PIPE1_RESET, 0);
6785 		break;
6786 	default:
6787 		break;
6788 	}
6789 
6790 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
6791 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
6792 
6793 	r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
6794 						RS64_FW_UC_START_ADDR_LO;
6795 	soc21_grbm_select(adev, 0, 0, 0, 0);
6796 	mutex_unlock(&adev->srbm_mutex);
6797 	gfx_v11_0_unset_safe_mode(adev, 0);
6798 
6799 	dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name,
6800 			r == 0 ? "successfully" : "failed");
6801 	/* FIXME: Sometimes driver can't cache the ME firmware start PC correctly,
6802 	 * so the pipe reset status relies on the later gfx ring test result.
6803 	 */
6804 	return 0;
6805 }
6806 
6807 static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
6808 			       unsigned int vmid,
6809 			       struct amdgpu_fence *timedout_fence)
6810 {
6811 	struct amdgpu_device *adev = ring->adev;
6812 	int r;
6813 
6814 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
6815 
6816 	r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
6817 	if (r) {
6818 
6819 		dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
6820 		r = gfx_v11_reset_gfx_pipe(ring);
6821 		if (r)
6822 			return r;
6823 	}
6824 
6825 	r = gfx_v11_0_kgq_init_queue(ring, true);
6826 	if (r) {
6827 		dev_err(adev->dev, "failed to init kgq\n");
6828 		return r;
6829 	}
6830 
6831 	r = amdgpu_mes_map_legacy_queue(adev, ring);
6832 	if (r) {
6833 		dev_err(adev->dev, "failed to remap kgq\n");
6834 		return r;
6835 	}
6836 
6837 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
6838 }
6839 
6840 static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
6841 {
6842 
6843 	struct amdgpu_device *adev = ring->adev;
6844 	uint32_t reset_pipe = 0, clean_pipe = 0;
6845 	int r;
6846 
6847 	if (!gfx_v11_pipe_reset_support(adev))
6848 		return -EOPNOTSUPP;
6849 
6850 	gfx_v11_0_set_safe_mode(adev, 0);
6851 	mutex_lock(&adev->srbm_mutex);
6852 	soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
6853 
6854 	reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
6855 	clean_pipe = reset_pipe;
6856 
6857 	if (adev->gfx.rs64_enable) {
6858 
6859 		switch (ring->pipe) {
6860 		case 0:
6861 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
6862 						   MEC_PIPE0_RESET, 1);
6863 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
6864 						   MEC_PIPE0_RESET, 0);
6865 			break;
6866 		case 1:
6867 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
6868 						   MEC_PIPE1_RESET, 1);
6869 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
6870 						   MEC_PIPE1_RESET, 0);
6871 			break;
6872 		case 2:
6873 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
6874 						   MEC_PIPE2_RESET, 1);
6875 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
6876 						   MEC_PIPE2_RESET, 0);
6877 			break;
6878 		case 3:
6879 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
6880 						   MEC_PIPE3_RESET, 1);
6881 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
6882 						   MEC_PIPE3_RESET, 0);
6883 			break;
6884 		default:
6885 			break;
6886 		}
6887 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
6888 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
6889 		r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
6890 					RS64_FW_UC_START_ADDR_LO;
6891 	} else {
6892 		if (ring->me == 1) {
6893 			switch (ring->pipe) {
6894 			case 0:
6895 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6896 							   MEC_ME1_PIPE0_RESET, 1);
6897 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6898 							   MEC_ME1_PIPE0_RESET, 0);
6899 				break;
6900 			case 1:
6901 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6902 							   MEC_ME1_PIPE1_RESET, 1);
6903 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6904 							   MEC_ME1_PIPE1_RESET, 0);
6905 				break;
6906 			case 2:
6907 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6908 							   MEC_ME1_PIPE2_RESET, 1);
6909 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6910 							   MEC_ME1_PIPE2_RESET, 0);
6911 				break;
6912 			case 3:
6913 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6914 							   MEC_ME1_PIPE3_RESET, 1);
6915 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6916 							   MEC_ME1_PIPE3_RESET, 0);
6917 				break;
6918 			default:
6919 				break;
6920 			}
6921 			/* mec1 fw pc: CP_MEC1_INSTR_PNTR */
6922 		} else {
6923 			switch (ring->pipe) {
6924 			case 0:
6925 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6926 							   MEC_ME2_PIPE0_RESET, 1);
6927 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6928 							   MEC_ME2_PIPE0_RESET, 0);
6929 				break;
6930 			case 1:
6931 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6932 							   MEC_ME2_PIPE1_RESET, 1);
6933 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6934 							   MEC_ME2_PIPE1_RESET, 0);
6935 				break;
6936 			case 2:
6937 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6938 							   MEC_ME2_PIPE2_RESET, 1);
6939 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6940 							   MEC_ME2_PIPE2_RESET, 0);
6941 				break;
6942 			case 3:
6943 				reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
6944 							   MEC_ME2_PIPE3_RESET, 1);
6945 				clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
6946 							   MEC_ME2_PIPE3_RESET, 0);
6947 				break;
6948 			default:
6949 				break;
6950 			}
6951 			/* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */
6952 		}
6953 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
6954 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
6955 		r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR));
6956 	}
6957 
6958 	soc21_grbm_select(adev, 0, 0, 0, 0);
6959 	mutex_unlock(&adev->srbm_mutex);
6960 	gfx_v11_0_unset_safe_mode(adev, 0);
6961 
6962 	dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name,
6963 			r == 0 ? "successfully" : "failed");
6964 	/*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe
6965 	 * reset status relies on the compute ring test result.
6966 	 */
6967 	return 0;
6968 }
6969 
6970 static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
6971 			       unsigned int vmid,
6972 			       struct amdgpu_fence *timedout_fence)
6973 {
6974 	struct amdgpu_device *adev = ring->adev;
6975 	int r = 0;
6976 
6977 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
6978 
6979 	r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
6980 	if (r) {
6981 		dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
6982 		r = gfx_v11_0_reset_compute_pipe(ring);
6983 		if (r)
6984 			return r;
6985 	}
6986 
6987 	r = gfx_v11_0_kcq_init_queue(ring, true);
6988 	if (r) {
6989 		dev_err(adev->dev, "fail to init kcq\n");
6990 		return r;
6991 	}
6992 	r = amdgpu_mes_map_legacy_queue(adev, ring);
6993 	if (r) {
6994 		dev_err(adev->dev, "failed to remap kcq\n");
6995 		return r;
6996 	}
6997 
6998 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
6999 }
7000 
7001 static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
7002 {
7003 	struct amdgpu_device *adev = ip_block->adev;
7004 	uint32_t i, j, k, reg, index = 0;
7005 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
7006 
7007 	if (!adev->gfx.ip_dump_core)
7008 		return;
7009 
7010 	for (i = 0; i < reg_count; i++)
7011 		drm_printf(p, "%-50s \t 0x%08x\n",
7012 			   gc_reg_list_11_0[i].reg_name,
7013 			   adev->gfx.ip_dump_core[i]);
7014 
7015 	/* print compute queue registers for all instances */
7016 	if (!adev->gfx.ip_dump_compute_queues)
7017 		return;
7018 
7019 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
7020 	drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
7021 		   adev->gfx.mec.num_mec,
7022 		   adev->gfx.mec.num_pipe_per_mec,
7023 		   adev->gfx.mec.num_queue_per_pipe);
7024 
7025 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7026 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7027 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7028 				drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
7029 				for (reg = 0; reg < reg_count; reg++) {
7030 					if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP)
7031 						drm_printf(p, "%-50s \t 0x%08x\n",
7032 							   "regCP_MEC_ME2_HEADER_DUMP",
7033 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7034 					else
7035 						drm_printf(p, "%-50s \t 0x%08x\n",
7036 							   gc_cp_reg_list_11[reg].reg_name,
7037 							   adev->gfx.ip_dump_compute_queues[index + reg]);
7038 				}
7039 				index += reg_count;
7040 			}
7041 		}
7042 	}
7043 
7044 	/* print gfx queue registers for all instances */
7045 	if (!adev->gfx.ip_dump_gfx_queues)
7046 		return;
7047 
7048 	index = 0;
7049 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
7050 	drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n",
7051 		   adev->gfx.me.num_me,
7052 		   adev->gfx.me.num_pipe_per_me,
7053 		   adev->gfx.me.num_queue_per_pipe);
7054 
7055 	for (i = 0; i < adev->gfx.me.num_me; i++) {
7056 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
7057 			for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
7058 				drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k);
7059 				for (reg = 0; reg < reg_count; reg++) {
7060 					drm_printf(p, "%-50s \t 0x%08x\n",
7061 						   gc_gfx_queue_reg_list_11[reg].reg_name,
7062 						   adev->gfx.ip_dump_gfx_queues[index + reg]);
7063 				}
7064 				index += reg_count;
7065 			}
7066 		}
7067 	}
7068 }
7069 
7070 static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
7071 {
7072 	struct amdgpu_device *adev = ip_block->adev;
7073 	uint32_t i, j, k, reg, index = 0;
7074 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
7075 
7076 	if (!adev->gfx.ip_dump_core)
7077 		return;
7078 
7079 	amdgpu_gfx_off_ctrl(adev, false);
7080 	for (i = 0; i < reg_count; i++)
7081 		adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_11_0[i]));
7082 	amdgpu_gfx_off_ctrl(adev, true);
7083 
7084 	/* dump compute queue registers for all instances */
7085 	if (!adev->gfx.ip_dump_compute_queues)
7086 		return;
7087 
7088 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
7089 	amdgpu_gfx_off_ctrl(adev, false);
7090 	mutex_lock(&adev->srbm_mutex);
7091 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7092 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7093 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7094 				/* ME0 is for GFX so start from 1 for CP */
7095 				soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
7096 				for (reg = 0; reg < reg_count; reg++) {
7097 					if (i &&
7098 					    gc_cp_reg_list_11[reg].reg_offset ==
7099 						    regCP_MEC_ME1_HEADER_DUMP)
7100 						adev->gfx.ip_dump_compute_queues[index + reg] =
7101 							RREG32(SOC15_REG_OFFSET(GC, 0,
7102 							       regCP_MEC_ME2_HEADER_DUMP));
7103 					else
7104 						adev->gfx.ip_dump_compute_queues[index + reg] =
7105 							RREG32(SOC15_REG_ENTRY_OFFSET(
7106 								       gc_cp_reg_list_11[reg]));
7107 				}
7108 				index += reg_count;
7109 			}
7110 		}
7111 	}
7112 	soc21_grbm_select(adev, 0, 0, 0, 0);
7113 	mutex_unlock(&adev->srbm_mutex);
7114 	amdgpu_gfx_off_ctrl(adev, true);
7115 
7116 	/* dump gfx queue registers for all instances */
7117 	if (!adev->gfx.ip_dump_gfx_queues)
7118 		return;
7119 
7120 	index = 0;
7121 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
7122 	amdgpu_gfx_off_ctrl(adev, false);
7123 	mutex_lock(&adev->srbm_mutex);
7124 	for (i = 0; i < adev->gfx.me.num_me; i++) {
7125 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
7126 			for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
7127 				soc21_grbm_select(adev, i, j, k, 0);
7128 
7129 				for (reg = 0; reg < reg_count; reg++) {
7130 					adev->gfx.ip_dump_gfx_queues[index + reg] =
7131 						RREG32(SOC15_REG_ENTRY_OFFSET(
7132 							gc_gfx_queue_reg_list_11[reg]));
7133 				}
7134 				index += reg_count;
7135 			}
7136 		}
7137 	}
7138 	soc21_grbm_select(adev, 0, 0, 0, 0);
7139 	mutex_unlock(&adev->srbm_mutex);
7140 	amdgpu_gfx_off_ctrl(adev, true);
7141 }
7142 
7143 static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
7144 {
7145 	/* Emit the cleaner shader */
7146 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
7147 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
7148 }
7149 
7150 static void gfx_v11_0_ring_begin_use(struct amdgpu_ring *ring)
7151 {
7152 	amdgpu_gfx_profile_ring_begin_use(ring);
7153 
7154 	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
7155 }
7156 
7157 static void gfx_v11_0_ring_end_use(struct amdgpu_ring *ring)
7158 {
7159 	amdgpu_gfx_profile_ring_end_use(ring);
7160 
7161 	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
7162 }
7163 
7164 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
7165 	.name = "gfx_v11_0",
7166 	.early_init = gfx_v11_0_early_init,
7167 	.late_init = gfx_v11_0_late_init,
7168 	.sw_init = gfx_v11_0_sw_init,
7169 	.sw_fini = gfx_v11_0_sw_fini,
7170 	.hw_init = gfx_v11_0_hw_init,
7171 	.hw_fini = gfx_v11_0_hw_fini,
7172 	.suspend = gfx_v11_0_suspend,
7173 	.resume = gfx_v11_0_resume,
7174 	.is_idle = gfx_v11_0_is_idle,
7175 	.wait_for_idle = gfx_v11_0_wait_for_idle,
7176 	.soft_reset = gfx_v11_0_soft_reset,
7177 	.check_soft_reset = gfx_v11_0_check_soft_reset,
7178 	.post_soft_reset = gfx_v11_0_post_soft_reset,
7179 	.set_clockgating_state = gfx_v11_0_set_clockgating_state,
7180 	.set_powergating_state = gfx_v11_0_set_powergating_state,
7181 	.get_clockgating_state = gfx_v11_0_get_clockgating_state,
7182 	.dump_ip_state = gfx_v11_ip_dump,
7183 	.print_ip_state = gfx_v11_ip_print,
7184 };
7185 
7186 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
7187 	.type = AMDGPU_RING_TYPE_GFX,
7188 	.align_mask = 0xff,
7189 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7190 	.support_64bit_ptrs = true,
7191 	.secure_submission_supported = true,
7192 	.get_rptr = gfx_v11_0_ring_get_rptr_gfx,
7193 	.get_wptr = gfx_v11_0_ring_get_wptr_gfx,
7194 	.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
7195 	.emit_frame_size = /* totally 247 maximum if 16 IBs */
7196 		5 + /* update_spm_vmid */
7197 		5 + /* COND_EXEC */
7198 		22 + /* SET_Q_PREEMPTION_MODE */
7199 		7 + /* PIPELINE_SYNC */
7200 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7201 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7202 		4 + /* VM_FLUSH */
7203 		8 + /* FENCE for VM_FLUSH */
7204 		20 + /* GDS switch */
7205 		5 + /* COND_EXEC */
7206 		7 + /* HDP_flush */
7207 		4 + /* VGT_flush */
7208 		31 + /*	DE_META */
7209 		3 + /* CNTX_CTRL */
7210 		5 + /* HDP_INVL */
7211 		22 + /* SET_Q_PREEMPTION_MODE */
7212 		8 + 8 + /* FENCE x2 */
7213 		8 + /* gfx_v11_0_emit_mem_sync */
7214 		2, /* gfx_v11_0_ring_emit_cleaner_shader */
7215 	.emit_ib_size =	4, /* gfx_v11_0_ring_emit_ib_gfx */
7216 	.emit_ib = gfx_v11_0_ring_emit_ib_gfx,
7217 	.emit_fence = gfx_v11_0_ring_emit_fence,
7218 	.emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
7219 	.emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
7220 	.emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
7221 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
7222 	.test_ring = gfx_v11_0_ring_test_ring,
7223 	.test_ib = gfx_v11_0_ring_test_ib,
7224 	.insert_nop = gfx_v11_ring_insert_nop,
7225 	.pad_ib = amdgpu_ring_generic_pad_ib,
7226 	.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
7227 	.emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
7228 	.init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
7229 	.preempt_ib = gfx_v11_0_ring_preempt_ib,
7230 	.emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
7231 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
7232 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
7233 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
7234 	.emit_mem_sync = gfx_v11_0_emit_mem_sync,
7235 	.reset = gfx_v11_0_reset_kgq,
7236 	.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
7237 	.begin_use = gfx_v11_0_ring_begin_use,
7238 	.end_use = gfx_v11_0_ring_end_use,
7239 };
7240 
7241 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
7242 	.type = AMDGPU_RING_TYPE_COMPUTE,
7243 	.align_mask = 0xff,
7244 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7245 	.support_64bit_ptrs = true,
7246 	.get_rptr = gfx_v11_0_ring_get_rptr_compute,
7247 	.get_wptr = gfx_v11_0_ring_get_wptr_compute,
7248 	.set_wptr = gfx_v11_0_ring_set_wptr_compute,
7249 	.emit_frame_size =
7250 		5 + /* update_spm_vmid */
7251 		20 + /* gfx_v11_0_ring_emit_gds_switch */
7252 		7 + /* gfx_v11_0_ring_emit_hdp_flush */
7253 		5 + /* hdp invalidate */
7254 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
7255 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7256 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7257 		2 + /* gfx_v11_0_ring_emit_vm_flush */
7258 		8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
7259 		8 + /* gfx_v11_0_emit_mem_sync */
7260 		2, /* gfx_v11_0_ring_emit_cleaner_shader */
7261 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
7262 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
7263 	.emit_fence = gfx_v11_0_ring_emit_fence,
7264 	.emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
7265 	.emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
7266 	.emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
7267 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
7268 	.test_ring = gfx_v11_0_ring_test_ring,
7269 	.test_ib = gfx_v11_0_ring_test_ib,
7270 	.insert_nop = gfx_v11_ring_insert_nop,
7271 	.pad_ib = amdgpu_ring_generic_pad_ib,
7272 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
7273 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
7274 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
7275 	.emit_mem_sync = gfx_v11_0_emit_mem_sync,
7276 	.reset = gfx_v11_0_reset_kcq,
7277 	.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
7278 	.begin_use = gfx_v11_0_ring_begin_use,
7279 	.end_use = gfx_v11_0_ring_end_use,
7280 };
7281 
7282 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
7283 	.type = AMDGPU_RING_TYPE_KIQ,
7284 	.align_mask = 0xff,
7285 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
7286 	.support_64bit_ptrs = true,
7287 	.get_rptr = gfx_v11_0_ring_get_rptr_compute,
7288 	.get_wptr = gfx_v11_0_ring_get_wptr_compute,
7289 	.set_wptr = gfx_v11_0_ring_set_wptr_compute,
7290 	.emit_frame_size =
7291 		20 + /* gfx_v11_0_ring_emit_gds_switch */
7292 		7 + /* gfx_v11_0_ring_emit_hdp_flush */
7293 		5 + /*hdp invalidate */
7294 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
7295 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7296 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7297 		8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
7298 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
7299 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
7300 	.emit_fence = gfx_v11_0_ring_emit_fence_kiq,
7301 	.test_ring = gfx_v11_0_ring_test_ring,
7302 	.test_ib = gfx_v11_0_ring_test_ib,
7303 	.insert_nop = amdgpu_ring_insert_nop,
7304 	.pad_ib = amdgpu_ring_generic_pad_ib,
7305 	.emit_rreg = gfx_v11_0_ring_emit_rreg,
7306 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
7307 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
7308 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
7309 };
7310 
7311 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
7312 {
7313 	int i;
7314 
7315 	adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
7316 
7317 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7318 		adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
7319 
7320 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
7321 		adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
7322 }
7323 
7324 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
7325 	.set = gfx_v11_0_set_eop_interrupt_state,
7326 	.process = gfx_v11_0_eop_irq,
7327 };
7328 
7329 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
7330 	.set = gfx_v11_0_set_priv_reg_fault_state,
7331 	.process = gfx_v11_0_priv_reg_irq,
7332 };
7333 
7334 static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = {
7335 	.set = gfx_v11_0_set_bad_op_fault_state,
7336 	.process = gfx_v11_0_bad_op_irq,
7337 };
7338 
7339 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
7340 	.set = gfx_v11_0_set_priv_inst_fault_state,
7341 	.process = gfx_v11_0_priv_inst_irq,
7342 };
7343 
7344 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
7345 	.process = gfx_v11_0_rlc_gc_fed_irq,
7346 };
7347 
7348 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
7349 {
7350 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7351 	adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
7352 
7353 	adev->gfx.priv_reg_irq.num_types = 1;
7354 	adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
7355 
7356 	adev->gfx.bad_op_irq.num_types = 1;
7357 	adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs;
7358 
7359 	adev->gfx.priv_inst_irq.num_types = 1;
7360 	adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
7361 
7362 	adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
7363 	adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
7364 
7365 }
7366 
7367 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
7368 {
7369 	if (adev->flags & AMD_IS_APU)
7370 		adev->gfx.imu.mode = MISSION_MODE;
7371 	else
7372 		adev->gfx.imu.mode = DEBUG_MODE;
7373 
7374 	adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
7375 }
7376 
7377 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
7378 {
7379 	adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
7380 }
7381 
7382 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
7383 {
7384 	unsigned total_cu = adev->gfx.config.max_cu_per_sh *
7385 			    adev->gfx.config.max_sh_per_se *
7386 			    adev->gfx.config.max_shader_engines;
7387 
7388 	adev->gds.gds_size = 0x1000;
7389 	adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
7390 	adev->gds.gws_size = 64;
7391 	adev->gds.oa_size = 16;
7392 }
7393 
7394 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
7395 {
7396 	/* set gfx eng mqd */
7397 	adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
7398 		sizeof(struct v11_gfx_mqd);
7399 	adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
7400 		gfx_v11_0_gfx_mqd_init;
7401 	/* set compute eng mqd */
7402 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
7403 		sizeof(struct v11_compute_mqd);
7404 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
7405 		gfx_v11_0_compute_mqd_init;
7406 }
7407 
7408 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
7409 							  u32 bitmap)
7410 {
7411 	u32 data;
7412 
7413 	if (!bitmap)
7414 		return;
7415 
7416 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
7417 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
7418 
7419 	WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
7420 }
7421 
7422 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
7423 {
7424 	u32 data, wgp_bitmask;
7425 	data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
7426 	data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
7427 
7428 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
7429 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
7430 
7431 	wgp_bitmask =
7432 		amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
7433 
7434 	return (~data) & wgp_bitmask;
7435 }
7436 
7437 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
7438 {
7439 	u32 wgp_idx, wgp_active_bitmap;
7440 	u32 cu_bitmap_per_wgp, cu_active_bitmap;
7441 
7442 	wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
7443 	cu_active_bitmap = 0;
7444 
7445 	for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
7446 		/* if there is one WGP enabled, it means 2 CUs will be enabled */
7447 		cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
7448 		if (wgp_active_bitmap & (1 << wgp_idx))
7449 			cu_active_bitmap |= cu_bitmap_per_wgp;
7450 	}
7451 
7452 	return cu_active_bitmap;
7453 }
7454 
7455 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
7456 				 struct amdgpu_cu_info *cu_info)
7457 {
7458 	int i, j, k, counter, active_cu_number = 0;
7459 	u32 mask, bitmap;
7460 	unsigned disable_masks[8 * 2];
7461 
7462 	if (!adev || !cu_info)
7463 		return -EINVAL;
7464 
7465 	amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
7466 
7467 	mutex_lock(&adev->grbm_idx_mutex);
7468 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7469 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7470 			bitmap = i * adev->gfx.config.max_sh_per_se + j;
7471 			if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
7472 				continue;
7473 			mask = 1;
7474 			counter = 0;
7475 			gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
7476 			if (i < 8 && j < 2)
7477 				gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
7478 					adev, disable_masks[i * 2 + j]);
7479 			bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
7480 
7481 			/**
7482 			 * GFX11 could support more than 4 SEs, while the bitmap
7483 			 * in cu_info struct is 4x4 and ioctl interface struct
7484 			 * drm_amdgpu_info_device should keep stable.
7485 			 * So we use last two columns of bitmap to store cu mask for
7486 			 * SEs 4 to 7, the layout of the bitmap is as below:
7487 			 *    SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
7488 			 *    SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
7489 			 *    SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
7490 			 *    SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
7491 			 *    SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
7492 			 *    SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
7493 			 *    SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
7494 			 *    SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
7495 			 */
7496 			cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
7497 
7498 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
7499 				if (bitmap & mask)
7500 					counter++;
7501 
7502 				mask <<= 1;
7503 			}
7504 			active_cu_number += counter;
7505 		}
7506 	}
7507 	gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7508 	mutex_unlock(&adev->grbm_idx_mutex);
7509 
7510 	cu_info->number = active_cu_number;
7511 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7512 
7513 	return 0;
7514 }
7515 
7516 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
7517 {
7518 	.type = AMD_IP_BLOCK_TYPE_GFX,
7519 	.major = 11,
7520 	.minor = 0,
7521 	.rev = 0,
7522 	.funcs = &gfx_v11_0_ip_funcs,
7523 };
7524