xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c (revision 06b0a4ad7162b9dd7e52dbec320ea9d080d9e551)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v11_0.h"
34 #include "soc21.h"
35 #include "nvd.h"
36 
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 #include "smuio/smuio_13_0_6_offset.h"
40 #include "smuio/smuio_13_0_6_sh_mask.h"
41 #include "navi10_enum.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
43 
44 #include "soc15.h"
45 #include "soc15d.h"
46 #include "clearstate_gfx11.h"
47 #include "v11_structs.h"
48 #include "gfx_v11_0.h"
49 #include "gfx_v11_0_cleaner_shader.h"
50 #include "gfx_v11_0_3.h"
51 #include "nbio_v4_3.h"
52 #include "mes_v11_0.h"
53 
54 #define GFX11_NUM_GFX_RINGS		1
55 #define GFX11_MEC_HPD_SIZE	2048
56 
57 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
58 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1	0x1388
59 
60 #define regCGTT_WD_CLK_CTRL		0x5086
61 #define regCGTT_WD_CLK_CTRL_BASE_IDX	1
62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1	0x4e7e
63 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX	1
64 #define regPC_CONFIG_CNTL_1		0x194d
65 #define regPC_CONFIG_CNTL_1_BASE_IDX	1
66 
67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
69 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
70 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
71 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
72 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
73 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
75 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
76 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
77 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
79 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
80 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
81 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
83 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
84 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
85 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
86 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
87 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
88 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
89 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin");
90 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
91 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
92 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
93 MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin");
94 MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin");
95 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin");
96 MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin");
97 MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin");
98 MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin");
99 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin");
100 MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin");
101 MODULE_FIRMWARE("amdgpu/gc_11_5_3_pfp.bin");
102 MODULE_FIRMWARE("amdgpu/gc_11_5_3_me.bin");
103 MODULE_FIRMWARE("amdgpu/gc_11_5_3_mec.bin");
104 MODULE_FIRMWARE("amdgpu/gc_11_5_3_rlc.bin");
105 
106 static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
107 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
108 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
109 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3),
110 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
111 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
112 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3),
113 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
114 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
115 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
116 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
117 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
118 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2),
119 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2),
120 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
121 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
122 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0),
123 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE),
124 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
125 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR),
126 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE),
127 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR),
128 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_BASE),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_RPTR),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_WPTR),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI),
139 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ),
140 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
143 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
144 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
145 	SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS),
146 	SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2),
147 	SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS),
148 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
149 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
150 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES),
151 	SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS),
152 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
153 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL),
154 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS),
155 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
156 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
157 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL),
158 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
159 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_DEBUG_INTERRUPT_INSTR_PNTR),
160 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR),
161 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR),
162 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
163 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
164 	/* cp header registers */
165 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
166 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
167 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
168 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
169 	/* SE status registers */
170 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
171 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
172 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
173 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3),
174 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE4),
175 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE5)
176 };
177 
178 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
179 	/* compute registers */
180 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
181 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
182 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
183 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
184 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
185 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
186 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
187 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
188 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
189 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
190 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
191 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
192 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
193 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
194 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
195 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
196 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
197 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
198 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
199 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
200 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
201 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
202 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
203 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
204 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
205 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
206 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
207 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
208 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
209 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
210 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
211 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
212 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
213 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
214 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
215 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
216 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
217 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
218 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
219 };
220 
221 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
222 	/* gfx queue registers */
223 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE),
224 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID),
225 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY),
226 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM),
227 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE),
228 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI),
229 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET),
230 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL),
231 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR),
232 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR),
233 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI),
234 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST),
235 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED),
236 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL),
237 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0),
238 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0),
239 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR),
240 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI),
241 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO),
242 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI),
243 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
244 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
245 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
246 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
247 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
248 };
249 
250 static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
251 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
252 };
253 
254 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
255 {
256 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
257 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
258 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
259 	SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
260 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
261 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
262 	SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
263 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
264 	SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
265 };
266 
267 #define DEFAULT_SH_MEM_CONFIG \
268 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
269 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
270 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
271 
272 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
273 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
274 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
275 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
276 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
277 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
278 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
279 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
280                                  struct amdgpu_cu_info *cu_info);
281 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
282 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
283 				   u32 sh_num, u32 instance, int xcc_id);
284 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
285 
286 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
287 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
288 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
289 				     uint32_t val);
290 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
291 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
292 					   uint16_t pasid, uint32_t flush_type,
293 					   bool all_hub, uint8_t dst_sel);
294 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
295 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
296 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
297 				      bool enable);
298 
299 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
300 {
301 	struct amdgpu_device *adev = kiq_ring->adev;
302 	u64 shader_mc_addr;
303 
304 	/* Cleaner shader MC address */
305 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
306 
307 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
308 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
309 			  PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
310 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
311 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
312 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
313 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
314 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
315 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
316 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
317 }
318 
319 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
320 				 struct amdgpu_ring *ring)
321 {
322 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
323 	uint64_t wptr_addr = ring->wptr_gpu_addr;
324 	uint32_t me = 0, eng_sel = 0;
325 
326 	switch (ring->funcs->type) {
327 	case AMDGPU_RING_TYPE_COMPUTE:
328 		me = 1;
329 		eng_sel = 0;
330 		break;
331 	case AMDGPU_RING_TYPE_GFX:
332 		me = 0;
333 		eng_sel = 4;
334 		break;
335 	case AMDGPU_RING_TYPE_MES:
336 		me = 2;
337 		eng_sel = 5;
338 		break;
339 	default:
340 		WARN_ON(1);
341 	}
342 
343 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
344 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
345 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
346 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
347 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
348 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
349 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
350 			  PACKET3_MAP_QUEUES_ME((me)) |
351 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
352 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
353 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
354 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
355 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
356 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
357 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
358 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
359 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
360 }
361 
362 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
363 				   struct amdgpu_ring *ring,
364 				   enum amdgpu_unmap_queues_action action,
365 				   u64 gpu_addr, u64 seq)
366 {
367 	struct amdgpu_device *adev = kiq_ring->adev;
368 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
369 
370 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
371 		amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
372 		return;
373 	}
374 
375 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
376 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
377 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
378 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
379 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
380 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
381 	amdgpu_ring_write(kiq_ring,
382 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
383 
384 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
385 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
386 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
387 		amdgpu_ring_write(kiq_ring, seq);
388 	} else {
389 		amdgpu_ring_write(kiq_ring, 0);
390 		amdgpu_ring_write(kiq_ring, 0);
391 		amdgpu_ring_write(kiq_ring, 0);
392 	}
393 }
394 
395 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
396 				   struct amdgpu_ring *ring,
397 				   u64 addr,
398 				   u64 seq)
399 {
400 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
401 
402 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
403 	amdgpu_ring_write(kiq_ring,
404 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
405 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
406 			  PACKET3_QUERY_STATUS_COMMAND(2));
407 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
408 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
409 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
410 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
411 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
412 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
413 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
414 }
415 
416 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
417 				uint16_t pasid, uint32_t flush_type,
418 				bool all_hub)
419 {
420 	gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
421 }
422 
423 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
424 	.kiq_set_resources = gfx11_kiq_set_resources,
425 	.kiq_map_queues = gfx11_kiq_map_queues,
426 	.kiq_unmap_queues = gfx11_kiq_unmap_queues,
427 	.kiq_query_status = gfx11_kiq_query_status,
428 	.kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
429 	.set_resources_size = 8,
430 	.map_queues_size = 7,
431 	.unmap_queues_size = 6,
432 	.query_status_size = 7,
433 	.invalidate_tlbs_size = 2,
434 };
435 
436 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
437 {
438 	adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
439 }
440 
441 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
442 {
443 	if (amdgpu_sriov_vf(adev))
444 		return;
445 
446 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
447 	case IP_VERSION(11, 0, 1):
448 	case IP_VERSION(11, 0, 4):
449 		soc15_program_register_sequence(adev,
450 						golden_settings_gc_11_0_1,
451 						(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
452 		break;
453 	default:
454 		break;
455 	}
456 	soc15_program_register_sequence(adev,
457 					golden_settings_gc_11_0,
458 					(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
459 
460 }
461 
462 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
463 				       bool wc, uint32_t reg, uint32_t val)
464 {
465 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
466 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
467 			  WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
468 	amdgpu_ring_write(ring, reg);
469 	amdgpu_ring_write(ring, 0);
470 	amdgpu_ring_write(ring, val);
471 }
472 
473 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
474 				  int mem_space, int opt, uint32_t addr0,
475 				  uint32_t addr1, uint32_t ref, uint32_t mask,
476 				  uint32_t inv)
477 {
478 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
479 	amdgpu_ring_write(ring,
480 			  /* memory (1) or register (0) */
481 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
482 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
483 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
484 			   WAIT_REG_MEM_ENGINE(eng_sel)));
485 
486 	if (mem_space)
487 		BUG_ON(addr0 & 0x3); /* Dword align */
488 	amdgpu_ring_write(ring, addr0);
489 	amdgpu_ring_write(ring, addr1);
490 	amdgpu_ring_write(ring, ref);
491 	amdgpu_ring_write(ring, mask);
492 	amdgpu_ring_write(ring, inv); /* poll interval */
493 }
494 
495 static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
496 {
497 	/* Header itself is a NOP packet */
498 	if (num_nop == 1) {
499 		amdgpu_ring_write(ring, ring->funcs->nop);
500 		return;
501 	}
502 
503 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
504 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
505 
506 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
507 	amdgpu_ring_insert_nop(ring, num_nop - 1);
508 }
509 
510 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
511 {
512 	struct amdgpu_device *adev = ring->adev;
513 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
514 	uint32_t tmp = 0;
515 	unsigned i;
516 	int r;
517 
518 	WREG32(scratch, 0xCAFEDEAD);
519 	r = amdgpu_ring_alloc(ring, 5);
520 	if (r) {
521 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
522 			  ring->idx, r);
523 		return r;
524 	}
525 
526 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
527 		gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
528 	} else {
529 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
530 		amdgpu_ring_write(ring, scratch -
531 				  PACKET3_SET_UCONFIG_REG_START);
532 		amdgpu_ring_write(ring, 0xDEADBEEF);
533 	}
534 	amdgpu_ring_commit(ring);
535 
536 	for (i = 0; i < adev->usec_timeout; i++) {
537 		tmp = RREG32(scratch);
538 		if (tmp == 0xDEADBEEF)
539 			break;
540 		if (amdgpu_emu_mode == 1)
541 			msleep(1);
542 		else
543 			udelay(1);
544 	}
545 
546 	if (i >= adev->usec_timeout)
547 		r = -ETIMEDOUT;
548 	return r;
549 }
550 
551 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
552 {
553 	struct amdgpu_device *adev = ring->adev;
554 	struct amdgpu_ib ib;
555 	struct dma_fence *f = NULL;
556 	unsigned index;
557 	uint64_t gpu_addr;
558 	volatile uint32_t *cpu_ptr;
559 	long r;
560 
561 	/* MES KIQ fw hasn't indirect buffer support for now */
562 	if (adev->enable_mes_kiq &&
563 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
564 		return 0;
565 
566 	memset(&ib, 0, sizeof(ib));
567 
568 	if (ring->is_mes_queue) {
569 		uint32_t padding, offset;
570 
571 		offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
572 		padding = amdgpu_mes_ctx_get_offs(ring,
573 						  AMDGPU_MES_CTX_PADDING_OFFS);
574 
575 		ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
576 		ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
577 
578 		gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
579 		cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
580 		*cpu_ptr = cpu_to_le32(0xCAFEDEAD);
581 	} else {
582 		r = amdgpu_device_wb_get(adev, &index);
583 		if (r)
584 			return r;
585 
586 		gpu_addr = adev->wb.gpu_addr + (index * 4);
587 		adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
588 		cpu_ptr = &adev->wb.wb[index];
589 
590 		r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
591 		if (r) {
592 			DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
593 			goto err1;
594 		}
595 	}
596 
597 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
598 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
599 	ib.ptr[2] = lower_32_bits(gpu_addr);
600 	ib.ptr[3] = upper_32_bits(gpu_addr);
601 	ib.ptr[4] = 0xDEADBEEF;
602 	ib.length_dw = 5;
603 
604 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
605 	if (r)
606 		goto err2;
607 
608 	r = dma_fence_wait_timeout(f, false, timeout);
609 	if (r == 0) {
610 		r = -ETIMEDOUT;
611 		goto err2;
612 	} else if (r < 0) {
613 		goto err2;
614 	}
615 
616 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
617 		r = 0;
618 	else
619 		r = -EINVAL;
620 err2:
621 	if (!ring->is_mes_queue)
622 		amdgpu_ib_free(&ib, NULL);
623 	dma_fence_put(f);
624 err1:
625 	if (!ring->is_mes_queue)
626 		amdgpu_device_wb_free(adev, index);
627 	return r;
628 }
629 
630 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
631 {
632 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
633 	amdgpu_ucode_release(&adev->gfx.me_fw);
634 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
635 	amdgpu_ucode_release(&adev->gfx.mec_fw);
636 
637 	kfree(adev->gfx.rlc.register_list_format);
638 }
639 
640 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
641 {
642 	const struct psp_firmware_header_v1_0 *toc_hdr;
643 	int err = 0;
644 
645 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
646 				   AMDGPU_UCODE_REQUIRED,
647 				   "amdgpu/%s_toc.bin", ucode_prefix);
648 	if (err)
649 		goto out;
650 
651 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
652 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
653 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
654 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
655 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
656 				le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
657 	return 0;
658 out:
659 	amdgpu_ucode_release(&adev->psp.toc_fw);
660 	return err;
661 }
662 
663 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
664 {
665 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
666 	case IP_VERSION(11, 0, 0):
667 	case IP_VERSION(11, 0, 2):
668 	case IP_VERSION(11, 0, 3):
669 		if ((adev->gfx.me_fw_version >= 1505) &&
670 		    (adev->gfx.pfp_fw_version >= 1600) &&
671 		    (adev->gfx.mec_fw_version >= 512)) {
672 			if (amdgpu_sriov_vf(adev))
673 				adev->gfx.cp_gfx_shadow = true;
674 			else
675 				adev->gfx.cp_gfx_shadow = false;
676 		}
677 		break;
678 	default:
679 		adev->gfx.cp_gfx_shadow = false;
680 		break;
681 	}
682 }
683 
684 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
685 {
686 	char ucode_prefix[25];
687 	int err;
688 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
689 	uint16_t version_major;
690 	uint16_t version_minor;
691 
692 	DRM_DEBUG("\n");
693 
694 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
695 	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
696 				   AMDGPU_UCODE_REQUIRED,
697 				   "amdgpu/%s_pfp.bin", ucode_prefix);
698 	if (err)
699 		goto out;
700 	/* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
701 	adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
702 				(union amdgpu_firmware_header *)
703 				adev->gfx.pfp_fw->data, 2, 0);
704 	if (adev->gfx.rs64_enable) {
705 		dev_info(adev->dev, "CP RS64 enable\n");
706 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
707 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
708 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
709 	} else {
710 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
711 	}
712 
713 	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
714 				   AMDGPU_UCODE_REQUIRED,
715 				   "amdgpu/%s_me.bin", ucode_prefix);
716 	if (err)
717 		goto out;
718 	if (adev->gfx.rs64_enable) {
719 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
720 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
721 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
722 	} else {
723 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
724 	}
725 
726 	if (!amdgpu_sriov_vf(adev)) {
727 		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
728 		    adev->pdev->revision == 0xCE)
729 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
730 						   AMDGPU_UCODE_REQUIRED,
731 						   "amdgpu/gc_11_0_0_rlc_1.bin");
732 		else
733 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
734 						   AMDGPU_UCODE_REQUIRED,
735 						   "amdgpu/%s_rlc.bin", ucode_prefix);
736 		if (err)
737 			goto out;
738 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
739 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
740 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
741 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
742 		if (err)
743 			goto out;
744 	}
745 
746 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
747 				   AMDGPU_UCODE_REQUIRED,
748 				   "amdgpu/%s_mec.bin", ucode_prefix);
749 	if (err)
750 		goto out;
751 	if (adev->gfx.rs64_enable) {
752 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
753 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
754 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
755 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
756 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
757 	} else {
758 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
759 		amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
760 	}
761 
762 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
763 		err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
764 
765 	/* only one MEC for gfx 11.0.0. */
766 	adev->gfx.mec2_fw = NULL;
767 
768 	gfx_v11_0_check_fw_cp_gfx_shadow(adev);
769 
770 	if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) {
771 		err = adev->gfx.imu.funcs->init_microcode(adev);
772 		if (err)
773 			DRM_ERROR("Failed to init imu firmware!\n");
774 		return err;
775 	}
776 
777 out:
778 	if (err) {
779 		amdgpu_ucode_release(&adev->gfx.pfp_fw);
780 		amdgpu_ucode_release(&adev->gfx.me_fw);
781 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
782 		amdgpu_ucode_release(&adev->gfx.mec_fw);
783 	}
784 
785 	return err;
786 }
787 
788 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
789 {
790 	u32 count = 0;
791 	const struct cs_section_def *sect = NULL;
792 	const struct cs_extent_def *ext = NULL;
793 
794 	/* begin clear state */
795 	count += 2;
796 	/* context control state */
797 	count += 3;
798 
799 	for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
800 		for (ext = sect->section; ext->extent != NULL; ++ext) {
801 			if (sect->id == SECT_CONTEXT)
802 				count += 2 + ext->reg_count;
803 			else
804 				return 0;
805 		}
806 	}
807 
808 	/* set PA_SC_TILE_STEERING_OVERRIDE */
809 	count += 3;
810 	/* end clear state */
811 	count += 2;
812 	/* clear state */
813 	count += 2;
814 
815 	return count;
816 }
817 
818 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
819 				    volatile u32 *buffer)
820 {
821 	u32 count = 0, i;
822 	const struct cs_section_def *sect = NULL;
823 	const struct cs_extent_def *ext = NULL;
824 	int ctx_reg_offset;
825 
826 	if (adev->gfx.rlc.cs_data == NULL)
827 		return;
828 	if (buffer == NULL)
829 		return;
830 
831 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
832 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
833 
834 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
835 	buffer[count++] = cpu_to_le32(0x80000000);
836 	buffer[count++] = cpu_to_le32(0x80000000);
837 
838 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
839 		for (ext = sect->section; ext->extent != NULL; ++ext) {
840 			if (sect->id == SECT_CONTEXT) {
841 				buffer[count++] =
842 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
843 				buffer[count++] = cpu_to_le32(ext->reg_index -
844 						PACKET3_SET_CONTEXT_REG_START);
845 				for (i = 0; i < ext->reg_count; i++)
846 					buffer[count++] = cpu_to_le32(ext->extent[i]);
847 			} else {
848 				return;
849 			}
850 		}
851 	}
852 
853 	ctx_reg_offset =
854 		SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
855 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
856 	buffer[count++] = cpu_to_le32(ctx_reg_offset);
857 	buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
858 
859 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
860 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
861 
862 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
863 	buffer[count++] = cpu_to_le32(0);
864 }
865 
866 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
867 {
868 	/* clear state block */
869 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
870 			&adev->gfx.rlc.clear_state_gpu_addr,
871 			(void **)&adev->gfx.rlc.cs_ptr);
872 
873 	/* jump table block */
874 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
875 			&adev->gfx.rlc.cp_table_gpu_addr,
876 			(void **)&adev->gfx.rlc.cp_table_ptr);
877 }
878 
879 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
880 {
881 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
882 
883 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
884 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
885 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
886 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
887 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
888 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
889 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
890 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
891 	adev->gfx.rlc.rlcg_reg_access_supported = true;
892 }
893 
894 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
895 {
896 	const struct cs_section_def *cs_data;
897 	int r;
898 
899 	adev->gfx.rlc.cs_data = gfx11_cs_data;
900 
901 	cs_data = adev->gfx.rlc.cs_data;
902 
903 	if (cs_data) {
904 		/* init clear state block */
905 		r = amdgpu_gfx_rlc_init_csb(adev);
906 		if (r)
907 			return r;
908 	}
909 
910 	/* init spm vmid with 0xf */
911 	if (adev->gfx.rlc.funcs->update_spm_vmid)
912 		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
913 
914 	return 0;
915 }
916 
917 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
918 {
919 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
920 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
921 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
922 }
923 
924 static void gfx_v11_0_me_init(struct amdgpu_device *adev)
925 {
926 	bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
927 
928 	amdgpu_gfx_graphics_queue_acquire(adev);
929 }
930 
931 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
932 {
933 	int r;
934 	u32 *hpd;
935 	size_t mec_hpd_size;
936 
937 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
938 
939 	/* take ownership of the relevant compute queues */
940 	amdgpu_gfx_compute_queue_acquire(adev);
941 	mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
942 
943 	if (mec_hpd_size) {
944 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
945 					      AMDGPU_GEM_DOMAIN_GTT,
946 					      &adev->gfx.mec.hpd_eop_obj,
947 					      &adev->gfx.mec.hpd_eop_gpu_addr,
948 					      (void **)&hpd);
949 		if (r) {
950 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
951 			gfx_v11_0_mec_fini(adev);
952 			return r;
953 		}
954 
955 		memset(hpd, 0, mec_hpd_size);
956 
957 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
958 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
959 	}
960 
961 	return 0;
962 }
963 
964 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
965 {
966 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
967 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
968 		(address << SQ_IND_INDEX__INDEX__SHIFT));
969 	return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
970 }
971 
972 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
973 			   uint32_t thread, uint32_t regno,
974 			   uint32_t num, uint32_t *out)
975 {
976 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
977 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
978 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
979 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
980 		(SQ_IND_INDEX__AUTO_INCR_MASK));
981 	while (num--)
982 		*(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
983 }
984 
985 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
986 {
987 	/* in gfx11 the SIMD_ID is specified as part of the INSTANCE
988 	 * field when performing a select_se_sh so it should be
989 	 * zero here */
990 	WARN_ON(simd != 0);
991 
992 	/* type 3 wave data */
993 	dst[(*no_fields)++] = 3;
994 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
995 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
996 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
997 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
998 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
999 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1000 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1001 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1002 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1003 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1004 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1005 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1006 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1007 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1008 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
1009 }
1010 
1011 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1012 				     uint32_t wave, uint32_t start,
1013 				     uint32_t size, uint32_t *dst)
1014 {
1015 	WARN_ON(simd != 0);
1016 
1017 	wave_read_regs(
1018 		adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1019 		dst);
1020 }
1021 
1022 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1023 				      uint32_t wave, uint32_t thread,
1024 				      uint32_t start, uint32_t size,
1025 				      uint32_t *dst)
1026 {
1027 	wave_read_regs(
1028 		adev, wave, thread,
1029 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1030 }
1031 
1032 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
1033 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
1034 {
1035 	soc21_grbm_select(adev, me, pipe, q, vm);
1036 }
1037 
1038 /* all sizes are in bytes */
1039 #define MQD_SHADOW_BASE_SIZE      73728
1040 #define MQD_SHADOW_BASE_ALIGNMENT 256
1041 #define MQD_FWWORKAREA_SIZE       484
1042 #define MQD_FWWORKAREA_ALIGNMENT  256
1043 
1044 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
1045 					 struct amdgpu_gfx_shadow_info *shadow_info)
1046 {
1047 	if (adev->gfx.cp_gfx_shadow) {
1048 		shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
1049 		shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
1050 		shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
1051 		shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
1052 		return 0;
1053 	} else {
1054 		memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
1055 		return -ENOTSUPP;
1056 	}
1057 }
1058 
1059 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
1060 	.get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
1061 	.select_se_sh = &gfx_v11_0_select_se_sh,
1062 	.read_wave_data = &gfx_v11_0_read_wave_data,
1063 	.read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
1064 	.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
1065 	.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
1066 	.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
1067 	.get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
1068 };
1069 
1070 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
1071 {
1072 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1073 	case IP_VERSION(11, 0, 0):
1074 	case IP_VERSION(11, 0, 2):
1075 		adev->gfx.config.max_hw_contexts = 8;
1076 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1077 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1078 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1079 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1080 		break;
1081 	case IP_VERSION(11, 0, 3):
1082 		adev->gfx.ras = &gfx_v11_0_3_ras;
1083 		adev->gfx.config.max_hw_contexts = 8;
1084 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1085 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1086 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1087 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1088 		break;
1089 	case IP_VERSION(11, 0, 1):
1090 	case IP_VERSION(11, 0, 4):
1091 	case IP_VERSION(11, 5, 0):
1092 	case IP_VERSION(11, 5, 1):
1093 	case IP_VERSION(11, 5, 2):
1094 	case IP_VERSION(11, 5, 3):
1095 		adev->gfx.config.max_hw_contexts = 8;
1096 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1097 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1098 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
1099 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
1100 		break;
1101 	default:
1102 		BUG();
1103 		break;
1104 	}
1105 
1106 	return 0;
1107 }
1108 
1109 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1110 				   int me, int pipe, int queue)
1111 {
1112 	struct amdgpu_ring *ring;
1113 	unsigned int irq_type;
1114 	unsigned int hw_prio;
1115 
1116 	ring = &adev->gfx.gfx_ring[ring_id];
1117 
1118 	ring->me = me;
1119 	ring->pipe = pipe;
1120 	ring->queue = queue;
1121 
1122 	ring->ring_obj = NULL;
1123 	ring->use_doorbell = true;
1124 
1125 	if (!ring_id)
1126 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1127 	else
1128 		ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1129 	ring->vm_hub = AMDGPU_GFXHUB(0);
1130 	sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1131 
1132 	irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1133 	hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ?
1134 		AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1135 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1136 				hw_prio, NULL);
1137 }
1138 
1139 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1140 				       int mec, int pipe, int queue)
1141 {
1142 	int r;
1143 	unsigned irq_type;
1144 	struct amdgpu_ring *ring;
1145 	unsigned int hw_prio;
1146 
1147 	ring = &adev->gfx.compute_ring[ring_id];
1148 
1149 	/* mec0 is me1 */
1150 	ring->me = mec + 1;
1151 	ring->pipe = pipe;
1152 	ring->queue = queue;
1153 
1154 	ring->ring_obj = NULL;
1155 	ring->use_doorbell = true;
1156 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1157 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1158 				+ (ring_id * GFX11_MEC_HPD_SIZE);
1159 	ring->vm_hub = AMDGPU_GFXHUB(0);
1160 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1161 
1162 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1163 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1164 		+ ring->pipe;
1165 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1166 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1167 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1168 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1169 			     hw_prio, NULL);
1170 	if (r)
1171 		return r;
1172 
1173 	return 0;
1174 }
1175 
1176 static struct {
1177 	SOC21_FIRMWARE_ID	id;
1178 	unsigned int		offset;
1179 	unsigned int		size;
1180 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
1181 
1182 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
1183 {
1184 	RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
1185 
1186 	while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
1187 			(ucode->id < SOC21_FIRMWARE_ID_MAX)) {
1188 		rlc_autoload_info[ucode->id].id = ucode->id;
1189 		rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
1190 		rlc_autoload_info[ucode->id].size = ucode->size * 4;
1191 
1192 		ucode++;
1193 	}
1194 }
1195 
1196 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
1197 {
1198 	uint32_t total_size = 0;
1199 	SOC21_FIRMWARE_ID id;
1200 
1201 	gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
1202 
1203 	for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
1204 		total_size += rlc_autoload_info[id].size;
1205 
1206 	/* In case the offset in rlc toc ucode is aligned */
1207 	if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
1208 		total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
1209 			rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
1210 
1211 	return total_size;
1212 }
1213 
1214 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1215 {
1216 	int r;
1217 	uint32_t total_size;
1218 
1219 	total_size = gfx_v11_0_calc_toc_total_size(adev);
1220 
1221 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1222 				      AMDGPU_GEM_DOMAIN_VRAM |
1223 				      AMDGPU_GEM_DOMAIN_GTT,
1224 				      &adev->gfx.rlc.rlc_autoload_bo,
1225 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
1226 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1227 
1228 	if (r) {
1229 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1230 		return r;
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1237 					      SOC21_FIRMWARE_ID id,
1238 			    		      const void *fw_data,
1239 					      uint32_t fw_size,
1240 					      uint32_t *fw_autoload_mask)
1241 {
1242 	uint32_t toc_offset;
1243 	uint32_t toc_fw_size;
1244 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1245 
1246 	if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1247 		return;
1248 
1249 	toc_offset = rlc_autoload_info[id].offset;
1250 	toc_fw_size = rlc_autoload_info[id].size;
1251 
1252 	if (fw_size == 0)
1253 		fw_size = toc_fw_size;
1254 
1255 	if (fw_size > toc_fw_size)
1256 		fw_size = toc_fw_size;
1257 
1258 	memcpy(ptr + toc_offset, fw_data, fw_size);
1259 
1260 	if (fw_size < toc_fw_size)
1261 		memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1262 
1263 	if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1264 		*(uint64_t *)fw_autoload_mask |= 1ULL << id;
1265 }
1266 
1267 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1268 							uint32_t *fw_autoload_mask)
1269 {
1270 	void *data;
1271 	uint32_t size;
1272 	uint64_t *toc_ptr;
1273 
1274 	*(uint64_t *)fw_autoload_mask |= 0x1;
1275 
1276 	DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1277 
1278 	data = adev->psp.toc.start_addr;
1279 	size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1280 
1281 	toc_ptr = (uint64_t *)data + size / 8 - 1;
1282 	*toc_ptr = *(uint64_t *)fw_autoload_mask;
1283 
1284 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1285 					data, size, fw_autoload_mask);
1286 }
1287 
1288 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1289 							uint32_t *fw_autoload_mask)
1290 {
1291 	const __le32 *fw_data;
1292 	uint32_t fw_size;
1293 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1294 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1295 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1296 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1297 	uint16_t version_major, version_minor;
1298 
1299 	if (adev->gfx.rs64_enable) {
1300 		/* pfp ucode */
1301 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1302 			adev->gfx.pfp_fw->data;
1303 		/* instruction */
1304 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1305 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1306 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1307 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1308 						fw_data, fw_size, fw_autoload_mask);
1309 		/* data */
1310 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1311 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1312 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1313 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1314 						fw_data, fw_size, fw_autoload_mask);
1315 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1316 						fw_data, fw_size, fw_autoload_mask);
1317 		/* me ucode */
1318 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1319 			adev->gfx.me_fw->data;
1320 		/* instruction */
1321 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1322 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1323 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1324 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1325 						fw_data, fw_size, fw_autoload_mask);
1326 		/* data */
1327 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1328 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1329 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1330 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1331 						fw_data, fw_size, fw_autoload_mask);
1332 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1333 						fw_data, fw_size, fw_autoload_mask);
1334 		/* mec ucode */
1335 		cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1336 			adev->gfx.mec_fw->data;
1337 		/* instruction */
1338 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1339 			le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1340 		fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1341 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1342 						fw_data, fw_size, fw_autoload_mask);
1343 		/* data */
1344 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1345 			le32_to_cpu(cpv2_hdr->data_offset_bytes));
1346 		fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1347 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1348 						fw_data, fw_size, fw_autoload_mask);
1349 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1350 						fw_data, fw_size, fw_autoload_mask);
1351 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1352 						fw_data, fw_size, fw_autoload_mask);
1353 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1354 						fw_data, fw_size, fw_autoload_mask);
1355 	} else {
1356 		/* pfp ucode */
1357 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1358 			adev->gfx.pfp_fw->data;
1359 		fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1360 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1361 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1362 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1363 						fw_data, fw_size, fw_autoload_mask);
1364 
1365 		/* me ucode */
1366 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1367 			adev->gfx.me_fw->data;
1368 		fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1369 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1370 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1371 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1372 						fw_data, fw_size, fw_autoload_mask);
1373 
1374 		/* mec ucode */
1375 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1376 			adev->gfx.mec_fw->data;
1377 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1378 				le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1379 		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1380 			cp_hdr->jt_size * 4;
1381 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1382 						fw_data, fw_size, fw_autoload_mask);
1383 	}
1384 
1385 	/* rlc ucode */
1386 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1387 		adev->gfx.rlc_fw->data;
1388 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1389 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1390 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1391 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1392 					fw_data, fw_size, fw_autoload_mask);
1393 
1394 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1395 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1396 	if (version_major == 2) {
1397 		if (version_minor >= 2) {
1398 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1399 
1400 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1401 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1402 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1403 			gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1404 					fw_data, fw_size, fw_autoload_mask);
1405 
1406 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1407 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1408 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1409 			gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1410 					fw_data, fw_size, fw_autoload_mask);
1411 		}
1412 	}
1413 }
1414 
1415 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1416 							uint32_t *fw_autoload_mask)
1417 {
1418 	const __le32 *fw_data;
1419 	uint32_t fw_size;
1420 	const struct sdma_firmware_header_v2_0 *sdma_hdr;
1421 
1422 	sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1423 		adev->sdma.instance[0].fw->data;
1424 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1425 			le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1426 	fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1427 
1428 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1429 			SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1430 
1431 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1432 			le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1433 	fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1434 
1435 	gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1436 			SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1437 }
1438 
1439 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1440 							uint32_t *fw_autoload_mask)
1441 {
1442 	const __le32 *fw_data;
1443 	unsigned fw_size;
1444 	const struct mes_firmware_header_v1_0 *mes_hdr;
1445 	int pipe, ucode_id, data_id;
1446 
1447 	for (pipe = 0; pipe < 2; pipe++) {
1448 		if (pipe==0) {
1449 			ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1450 			data_id  = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1451 		} else {
1452 			ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1453 			data_id  = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1454 		}
1455 
1456 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1457 			adev->mes.fw[pipe]->data;
1458 
1459 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1460 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1461 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1462 
1463 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1464 				ucode_id, fw_data, fw_size, fw_autoload_mask);
1465 
1466 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1467 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1468 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1469 
1470 		gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1471 				data_id, fw_data, fw_size, fw_autoload_mask);
1472 	}
1473 }
1474 
1475 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1476 {
1477 	uint32_t rlc_g_offset, rlc_g_size;
1478 	uint64_t gpu_addr;
1479 	uint32_t autoload_fw_id[2];
1480 
1481 	memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1482 
1483 	/* RLC autoload sequence 2: copy ucode */
1484 	gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1485 	gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1486 	gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1487 	gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1488 
1489 	rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1490 	rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1491 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1492 
1493 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1494 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1495 
1496 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1497 
1498 	/* RLC autoload sequence 3: load IMU fw */
1499 	if (adev->gfx.imu.funcs->load_microcode)
1500 		adev->gfx.imu.funcs->load_microcode(adev);
1501 	/* RLC autoload sequence 4 init IMU fw */
1502 	if (adev->gfx.imu.funcs->setup_imu)
1503 		adev->gfx.imu.funcs->setup_imu(adev);
1504 	if (adev->gfx.imu.funcs->start_imu)
1505 		adev->gfx.imu.funcs->start_imu(adev);
1506 
1507 	/* RLC autoload sequence 5 disable gpa mode */
1508 	gfx_v11_0_disable_gpa_mode(adev);
1509 
1510 	return 0;
1511 }
1512 
1513 static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
1514 {
1515 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
1516 	uint32_t *ptr;
1517 	uint32_t inst;
1518 
1519 	ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
1520 	if (!ptr) {
1521 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1522 		adev->gfx.ip_dump_core = NULL;
1523 	} else {
1524 		adev->gfx.ip_dump_core = ptr;
1525 	}
1526 
1527 	/* Allocate memory for compute queue registers for all the instances */
1528 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
1529 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1530 		adev->gfx.mec.num_queue_per_pipe;
1531 
1532 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1533 	if (!ptr) {
1534 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1535 		adev->gfx.ip_dump_compute_queues = NULL;
1536 	} else {
1537 		adev->gfx.ip_dump_compute_queues = ptr;
1538 	}
1539 
1540 	/* Allocate memory for gfx queue registers for all the instances */
1541 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
1542 	inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me *
1543 		adev->gfx.me.num_queue_per_pipe;
1544 
1545 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1546 	if (!ptr) {
1547 		DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
1548 		adev->gfx.ip_dump_gfx_queues = NULL;
1549 	} else {
1550 		adev->gfx.ip_dump_gfx_queues = ptr;
1551 	}
1552 }
1553 
1554 static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
1555 {
1556 	int i, j, k, r, ring_id = 0;
1557 	int xcc_id = 0;
1558 	struct amdgpu_device *adev = ip_block->adev;
1559 
1560 	INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
1561 
1562 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1563 	case IP_VERSION(11, 0, 0):
1564 	case IP_VERSION(11, 0, 2):
1565 	case IP_VERSION(11, 0, 3):
1566 		adev->gfx.me.num_me = 1;
1567 		adev->gfx.me.num_pipe_per_me = 1;
1568 		adev->gfx.me.num_queue_per_pipe = 1;
1569 		adev->gfx.mec.num_mec = 2;
1570 		adev->gfx.mec.num_pipe_per_mec = 4;
1571 		adev->gfx.mec.num_queue_per_pipe = 4;
1572 		break;
1573 	case IP_VERSION(11, 0, 1):
1574 	case IP_VERSION(11, 0, 4):
1575 	case IP_VERSION(11, 5, 0):
1576 	case IP_VERSION(11, 5, 1):
1577 	case IP_VERSION(11, 5, 2):
1578 	case IP_VERSION(11, 5, 3):
1579 		adev->gfx.me.num_me = 1;
1580 		adev->gfx.me.num_pipe_per_me = 1;
1581 		adev->gfx.me.num_queue_per_pipe = 1;
1582 		adev->gfx.mec.num_mec = 1;
1583 		adev->gfx.mec.num_pipe_per_mec = 4;
1584 		adev->gfx.mec.num_queue_per_pipe = 4;
1585 		break;
1586 	default:
1587 		adev->gfx.me.num_me = 1;
1588 		adev->gfx.me.num_pipe_per_me = 1;
1589 		adev->gfx.me.num_queue_per_pipe = 1;
1590 		adev->gfx.mec.num_mec = 1;
1591 		adev->gfx.mec.num_pipe_per_mec = 4;
1592 		adev->gfx.mec.num_queue_per_pipe = 8;
1593 		break;
1594 	}
1595 
1596 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1597 	case IP_VERSION(11, 0, 0):
1598 	case IP_VERSION(11, 0, 2):
1599 	case IP_VERSION(11, 0, 3):
1600 		adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1601 		adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1602 		if (adev->gfx.me_fw_version  >= 2280 &&
1603 		    adev->gfx.pfp_fw_version >= 2370 &&
1604 		    adev->gfx.mec_fw_version >= 2450  &&
1605 		    adev->mes.fw_version[0] >= 99) {
1606 			adev->gfx.enable_cleaner_shader = true;
1607 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1608 			if (r) {
1609 				adev->gfx.enable_cleaner_shader = false;
1610 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1611 			}
1612 		}
1613 		break;
1614 	default:
1615 		adev->gfx.enable_cleaner_shader = false;
1616 		break;
1617 	}
1618 
1619 	/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1620 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) &&
1621 	    amdgpu_sriov_is_pp_one_vf(adev))
1622 		adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1623 
1624 	/* EOP Event */
1625 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1626 			      GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1627 			      &adev->gfx.eop_irq);
1628 	if (r)
1629 		return r;
1630 
1631 	/* Bad opcode Event */
1632 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1633 			      GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
1634 			      &adev->gfx.bad_op_irq);
1635 	if (r)
1636 		return r;
1637 
1638 	/* Privileged reg */
1639 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1640 			      GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1641 			      &adev->gfx.priv_reg_irq);
1642 	if (r)
1643 		return r;
1644 
1645 	/* Privileged inst */
1646 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1647 			      GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1648 			      &adev->gfx.priv_inst_irq);
1649 	if (r)
1650 		return r;
1651 
1652 	/* FED error */
1653 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1654 				  GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
1655 				  &adev->gfx.rlc_gc_fed_irq);
1656 	if (r)
1657 		return r;
1658 
1659 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1660 
1661 	gfx_v11_0_me_init(adev);
1662 
1663 	r = gfx_v11_0_rlc_init(adev);
1664 	if (r) {
1665 		DRM_ERROR("Failed to init rlc BOs!\n");
1666 		return r;
1667 	}
1668 
1669 	r = gfx_v11_0_mec_init(adev);
1670 	if (r) {
1671 		DRM_ERROR("Failed to init MEC BOs!\n");
1672 		return r;
1673 	}
1674 
1675 	/* set up the gfx ring */
1676 	for (i = 0; i < adev->gfx.me.num_me; i++) {
1677 		for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1678 			for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1679 				if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1680 					continue;
1681 
1682 				r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1683 							    i, k, j);
1684 				if (r)
1685 					return r;
1686 				ring_id++;
1687 			}
1688 		}
1689 	}
1690 
1691 	ring_id = 0;
1692 	/* set up the compute queues - allocate horizontally across pipes */
1693 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1694 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1695 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1696 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
1697 								     k, j))
1698 					continue;
1699 
1700 				r = gfx_v11_0_compute_ring_init(adev, ring_id,
1701 								i, k, j);
1702 				if (r)
1703 					return r;
1704 
1705 				ring_id++;
1706 			}
1707 		}
1708 	}
1709 
1710 	adev->gfx.gfx_supported_reset =
1711 		amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
1712 	adev->gfx.compute_supported_reset =
1713 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1714 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1715 	case IP_VERSION(11, 0, 0):
1716 	case IP_VERSION(11, 0, 2):
1717 	case IP_VERSION(11, 0, 3):
1718 		if ((adev->gfx.me_fw_version >= 2280) &&
1719 			    (adev->gfx.mec_fw_version >= 2410)) {
1720 				adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1721 				adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1722 		}
1723 		break;
1724 	default:
1725 		break;
1726 	}
1727 
1728 	if (!adev->enable_mes_kiq) {
1729 		r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
1730 		if (r) {
1731 			DRM_ERROR("Failed to init KIQ BOs!\n");
1732 			return r;
1733 		}
1734 
1735 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1736 		if (r)
1737 			return r;
1738 	}
1739 
1740 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
1741 	if (r)
1742 		return r;
1743 
1744 	/* allocate visible FB for rlc auto-loading fw */
1745 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1746 		r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1747 		if (r)
1748 			return r;
1749 	}
1750 
1751 	r = gfx_v11_0_gpu_early_init(adev);
1752 	if (r)
1753 		return r;
1754 
1755 	if (amdgpu_gfx_ras_sw_init(adev)) {
1756 		dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
1757 		return -EINVAL;
1758 	}
1759 
1760 	gfx_v11_0_alloc_ip_dump(adev);
1761 
1762 	r = amdgpu_gfx_sysfs_init(adev);
1763 	if (r)
1764 		return r;
1765 
1766 	return 0;
1767 }
1768 
1769 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1770 {
1771 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1772 			      &adev->gfx.pfp.pfp_fw_gpu_addr,
1773 			      (void **)&adev->gfx.pfp.pfp_fw_ptr);
1774 
1775 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1776 			      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1777 			      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1778 }
1779 
1780 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1781 {
1782 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1783 			      &adev->gfx.me.me_fw_gpu_addr,
1784 			      (void **)&adev->gfx.me.me_fw_ptr);
1785 
1786 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1787 			       &adev->gfx.me.me_fw_data_gpu_addr,
1788 			       (void **)&adev->gfx.me.me_fw_data_ptr);
1789 }
1790 
1791 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1792 {
1793 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1794 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1795 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1796 }
1797 
1798 static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
1799 {
1800 	int i;
1801 	struct amdgpu_device *adev = ip_block->adev;
1802 
1803 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1804 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1805 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1806 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1807 
1808 	amdgpu_gfx_mqd_sw_fini(adev, 0);
1809 
1810 	if (!adev->enable_mes_kiq) {
1811 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1812 		amdgpu_gfx_kiq_fini(adev, 0);
1813 	}
1814 
1815 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
1816 
1817 	gfx_v11_0_pfp_fini(adev);
1818 	gfx_v11_0_me_fini(adev);
1819 	gfx_v11_0_rlc_fini(adev);
1820 	gfx_v11_0_mec_fini(adev);
1821 
1822 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1823 		gfx_v11_0_rlc_autoload_buffer_fini(adev);
1824 
1825 	gfx_v11_0_free_microcode(adev);
1826 
1827 	amdgpu_gfx_sysfs_fini(adev);
1828 
1829 	kfree(adev->gfx.ip_dump_core);
1830 	kfree(adev->gfx.ip_dump_compute_queues);
1831 	kfree(adev->gfx.ip_dump_gfx_queues);
1832 
1833 	return 0;
1834 }
1835 
1836 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1837 				   u32 sh_num, u32 instance, int xcc_id)
1838 {
1839 	u32 data;
1840 
1841 	if (instance == 0xffffffff)
1842 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1843 				     INSTANCE_BROADCAST_WRITES, 1);
1844 	else
1845 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1846 				     instance);
1847 
1848 	if (se_num == 0xffffffff)
1849 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1850 				     1);
1851 	else
1852 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1853 
1854 	if (sh_num == 0xffffffff)
1855 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1856 				     1);
1857 	else
1858 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1859 
1860 	WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1861 }
1862 
1863 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1864 {
1865 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1866 
1867 	gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
1868 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1869 					   CC_GC_SA_UNIT_DISABLE,
1870 					   SA_DISABLE);
1871 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
1872 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1873 						 GC_USER_SA_UNIT_DISABLE,
1874 						 SA_DISABLE);
1875 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1876 					    adev->gfx.config.max_shader_engines);
1877 
1878 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1879 }
1880 
1881 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1882 {
1883 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1884 	u32 rb_mask;
1885 
1886 	gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1887 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1888 					    CC_RB_BACKEND_DISABLE,
1889 					    BACKEND_DISABLE);
1890 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1891 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1892 						 GC_USER_RB_BACKEND_DISABLE,
1893 						 BACKEND_DISABLE);
1894 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1895 					    adev->gfx.config.max_shader_engines);
1896 
1897 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1898 }
1899 
1900 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1901 {
1902 	u32 rb_bitmap_per_sa;
1903 	u32 rb_bitmap_width_per_sa;
1904 	u32 max_sa;
1905 	u32 active_sa_bitmap;
1906 	u32 global_active_rb_bitmap;
1907 	u32 active_rb_bitmap = 0;
1908 	u32 i;
1909 
1910 	/* query sa bitmap from SA_UNIT_DISABLE registers */
1911 	active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
1912 	/* query rb bitmap from RB_BACKEND_DISABLE registers */
1913 	global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
1914 
1915 	/* generate active rb bitmap according to active sa bitmap */
1916 	max_sa = adev->gfx.config.max_shader_engines *
1917 		 adev->gfx.config.max_sh_per_se;
1918 	rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1919 				 adev->gfx.config.max_sh_per_se;
1920 	rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
1921 
1922 	for (i = 0; i < max_sa; i++) {
1923 		if (active_sa_bitmap & (1 << i))
1924 			active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
1925 	}
1926 
1927 	active_rb_bitmap &= global_active_rb_bitmap;
1928 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1929 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1930 }
1931 
1932 #define DEFAULT_SH_MEM_BASES	(0x6000)
1933 #define LDS_APP_BASE           0x1
1934 #define SCRATCH_APP_BASE       0x2
1935 
1936 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
1937 {
1938 	int i;
1939 	uint32_t sh_mem_bases;
1940 	uint32_t data;
1941 
1942 	/*
1943 	 * Configure apertures:
1944 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1945 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1946 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1947 	 */
1948 	sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1949 			SCRATCH_APP_BASE;
1950 
1951 	mutex_lock(&adev->srbm_mutex);
1952 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1953 		soc21_grbm_select(adev, 0, 0, 0, i);
1954 		/* CP and shaders */
1955 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1956 		WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1957 
1958 		/* Enable trap for each kfd vmid. */
1959 		data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
1960 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1961 		WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
1962 	}
1963 	soc21_grbm_select(adev, 0, 0, 0, 0);
1964 	mutex_unlock(&adev->srbm_mutex);
1965 
1966 	/*
1967 	 * Initialize all compute VMIDs to have no GDS, GWS, or OA
1968 	 * access. These should be enabled by FW for target VMIDs.
1969 	 */
1970 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1971 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
1972 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
1973 		WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
1974 		WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
1975 	}
1976 }
1977 
1978 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
1979 {
1980 	int vmid;
1981 
1982 	/*
1983 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1984 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1985 	 * the driver can enable them for graphics. VMID0 should maintain
1986 	 * access so that HWS firmware can save/restore entries.
1987 	 */
1988 	for (vmid = 1; vmid < 16; vmid++) {
1989 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
1990 		WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
1991 		WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
1992 		WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
1993 	}
1994 }
1995 
1996 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
1997 {
1998 	/* TODO: harvest feature to be added later. */
1999 }
2000 
2001 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
2002 {
2003 	/* TCCs are global (not instanced). */
2004 	uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
2005 			       RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
2006 
2007 	adev->gfx.config.tcc_disabled_mask =
2008 		REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
2009 		(REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
2010 }
2011 
2012 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
2013 {
2014 	u32 tmp;
2015 	int i;
2016 
2017 	if (!amdgpu_sriov_vf(adev))
2018 		WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2019 
2020 	gfx_v11_0_setup_rb(adev);
2021 	gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
2022 	gfx_v11_0_get_tcc_info(adev);
2023 	adev->gfx.config.pa_sc_tile_steering_override = 0;
2024 
2025 	/* Set whether texture coordinate truncation is conformant. */
2026 	tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
2027 	adev->gfx.config.ta_cntl2_truncate_coord_mode =
2028 		REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
2029 
2030 	/* XXX SH_MEM regs */
2031 	/* where to put LDS, scratch, GPUVM in FSA64 space */
2032 	mutex_lock(&adev->srbm_mutex);
2033 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
2034 		soc21_grbm_select(adev, 0, 0, 0, i);
2035 		/* CP and shaders */
2036 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
2037 		if (i != 0) {
2038 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2039 				(adev->gmc.private_aperture_start >> 48));
2040 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2041 				(adev->gmc.shared_aperture_start >> 48));
2042 			WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
2043 		}
2044 	}
2045 	soc21_grbm_select(adev, 0, 0, 0, 0);
2046 
2047 	mutex_unlock(&adev->srbm_mutex);
2048 
2049 	gfx_v11_0_init_compute_vmid(adev);
2050 	gfx_v11_0_init_gds_vmid(adev);
2051 }
2052 
2053 static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev,
2054 				      int me, int pipe)
2055 {
2056 	if (me != 0)
2057 		return 0;
2058 
2059 	switch (pipe) {
2060 	case 0:
2061 		return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
2062 	case 1:
2063 		return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
2064 	default:
2065 		return 0;
2066 	}
2067 }
2068 
2069 static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev,
2070 				      int me, int pipe)
2071 {
2072 	/*
2073 	 * amdgpu controls only the first MEC. That's why this function only
2074 	 * handles the setting of interrupts for this specific MEC. All other
2075 	 * pipes' interrupts are set by amdkfd.
2076 	 */
2077 	if (me != 1)
2078 		return 0;
2079 
2080 	switch (pipe) {
2081 	case 0:
2082 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
2083 	case 1:
2084 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
2085 	case 2:
2086 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
2087 	case 3:
2088 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
2089 	default:
2090 		return 0;
2091 	}
2092 }
2093 
2094 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2095 					       bool enable)
2096 {
2097 	u32 tmp, cp_int_cntl_reg;
2098 	int i, j;
2099 
2100 	if (amdgpu_sriov_vf(adev))
2101 		return;
2102 
2103 	for (i = 0; i < adev->gfx.me.num_me; i++) {
2104 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
2105 			cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
2106 
2107 			if (cp_int_cntl_reg) {
2108 				tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
2109 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
2110 						    enable ? 1 : 0);
2111 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
2112 						    enable ? 1 : 0);
2113 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
2114 						    enable ? 1 : 0);
2115 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
2116 						    enable ? 1 : 0);
2117 				WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
2118 			}
2119 		}
2120 	}
2121 }
2122 
2123 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
2124 {
2125 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2126 
2127 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
2128 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2129 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
2130 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2131 	WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
2132 
2133 	return 0;
2134 }
2135 
2136 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
2137 {
2138 	u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
2139 
2140 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
2141 	WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
2142 }
2143 
2144 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
2145 {
2146 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2147 	udelay(50);
2148 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2149 	udelay(50);
2150 }
2151 
2152 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
2153 					     bool enable)
2154 {
2155 	uint32_t rlc_pg_cntl;
2156 
2157 	rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
2158 
2159 	if (!enable) {
2160 		/* RLC_PG_CNTL[23] = 0 (default)
2161 		 * RLC will wait for handshake acks with SMU
2162 		 * GFXOFF will be enabled
2163 		 * RLC_PG_CNTL[23] = 1
2164 		 * RLC will not issue any message to SMU
2165 		 * hence no handshake between SMU & RLC
2166 		 * GFXOFF will be disabled
2167 		 */
2168 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2169 	} else
2170 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2171 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
2172 }
2173 
2174 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
2175 {
2176 	/* TODO: enable rlc & smu handshake until smu
2177 	 * and gfxoff feature works as expected */
2178 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
2179 		gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
2180 
2181 	WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2182 	udelay(50);
2183 }
2184 
2185 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
2186 {
2187 	uint32_t tmp;
2188 
2189 	/* enable Save Restore Machine */
2190 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
2191 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2192 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
2193 	WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
2194 }
2195 
2196 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
2197 {
2198 	const struct rlc_firmware_header_v2_0 *hdr;
2199 	const __le32 *fw_data;
2200 	unsigned i, fw_size;
2201 
2202 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2203 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2204 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2205 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2206 
2207 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
2208 		     RLCG_UCODE_LOADING_START_ADDRESS);
2209 
2210 	for (i = 0; i < fw_size; i++)
2211 		WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
2212 			     le32_to_cpup(fw_data++));
2213 
2214 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2215 }
2216 
2217 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
2218 {
2219 	const struct rlc_firmware_header_v2_2 *hdr;
2220 	const __le32 *fw_data;
2221 	unsigned i, fw_size;
2222 	u32 tmp;
2223 
2224 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
2225 
2226 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2227 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
2228 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
2229 
2230 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
2231 
2232 	for (i = 0; i < fw_size; i++) {
2233 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2234 			msleep(1);
2235 		WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
2236 				le32_to_cpup(fw_data++));
2237 	}
2238 
2239 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2240 
2241 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2242 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
2243 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
2244 
2245 	WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
2246 	for (i = 0; i < fw_size; i++) {
2247 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2248 			msleep(1);
2249 		WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
2250 				le32_to_cpup(fw_data++));
2251 	}
2252 
2253 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2254 
2255 	tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
2256 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
2257 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
2258 	WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
2259 }
2260 
2261 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
2262 {
2263 	const struct rlc_firmware_header_v2_3 *hdr;
2264 	const __le32 *fw_data;
2265 	unsigned i, fw_size;
2266 	u32 tmp;
2267 
2268 	hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
2269 
2270 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2271 			le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
2272 	fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
2273 
2274 	WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
2275 
2276 	for (i = 0; i < fw_size; i++) {
2277 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2278 			msleep(1);
2279 		WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
2280 				le32_to_cpup(fw_data++));
2281 	}
2282 
2283 	WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
2284 
2285 	tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
2286 	tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
2287 	WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
2288 
2289 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2290 			le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
2291 	fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
2292 
2293 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
2294 
2295 	for (i = 0; i < fw_size; i++) {
2296 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2297 			msleep(1);
2298 		WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
2299 				le32_to_cpup(fw_data++));
2300 	}
2301 
2302 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
2303 
2304 	tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
2305 	tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
2306 	WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
2307 }
2308 
2309 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
2310 {
2311 	const struct rlc_firmware_header_v2_0 *hdr;
2312 	uint16_t version_major;
2313 	uint16_t version_minor;
2314 
2315 	if (!adev->gfx.rlc_fw)
2316 		return -EINVAL;
2317 
2318 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2319 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
2320 
2321 	version_major = le16_to_cpu(hdr->header.header_version_major);
2322 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
2323 
2324 	if (version_major == 2) {
2325 		gfx_v11_0_load_rlcg_microcode(adev);
2326 		if (amdgpu_dpm == 1) {
2327 			if (version_minor >= 2)
2328 				gfx_v11_0_load_rlc_iram_dram_microcode(adev);
2329 			if (version_minor == 3)
2330 				gfx_v11_0_load_rlcp_rlcv_microcode(adev);
2331 		}
2332 
2333 		return 0;
2334 	}
2335 
2336 	return -EINVAL;
2337 }
2338 
2339 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
2340 {
2341 	int r;
2342 
2343 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2344 		gfx_v11_0_init_csb(adev);
2345 
2346 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
2347 			gfx_v11_0_rlc_enable_srm(adev);
2348 	} else {
2349 		if (amdgpu_sriov_vf(adev)) {
2350 			gfx_v11_0_init_csb(adev);
2351 			return 0;
2352 		}
2353 
2354 		adev->gfx.rlc.funcs->stop(adev);
2355 
2356 		/* disable CG */
2357 		WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
2358 
2359 		/* disable PG */
2360 		WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
2361 
2362 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2363 			/* legacy rlc firmware loading */
2364 			r = gfx_v11_0_rlc_load_microcode(adev);
2365 			if (r)
2366 				return r;
2367 		}
2368 
2369 		gfx_v11_0_init_csb(adev);
2370 
2371 		adev->gfx.rlc.funcs->start(adev);
2372 	}
2373 	return 0;
2374 }
2375 
2376 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2377 {
2378 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2379 	uint32_t tmp;
2380 	int i;
2381 
2382 	/* Trigger an invalidation of the L1 instruction caches */
2383 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2384 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2385 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2386 
2387 	/* Wait for invalidation complete */
2388 	for (i = 0; i < usec_timeout; i++) {
2389 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2390 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2391 					INVALIDATE_CACHE_COMPLETE))
2392 			break;
2393 		udelay(1);
2394 	}
2395 
2396 	if (i >= usec_timeout) {
2397 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2398 		return -EINVAL;
2399 	}
2400 
2401 	if (amdgpu_emu_mode == 1)
2402 		adev->hdp.funcs->flush_hdp(adev, NULL);
2403 
2404 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2405 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2406 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2407 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2408 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2409 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2410 
2411 	/* Program me ucode address into intruction cache address register */
2412 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2413 			lower_32_bits(addr) & 0xFFFFF000);
2414 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2415 			upper_32_bits(addr));
2416 
2417 	return 0;
2418 }
2419 
2420 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2421 {
2422 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2423 	uint32_t tmp;
2424 	int i;
2425 
2426 	/* Trigger an invalidation of the L1 instruction caches */
2427 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2428 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2429 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2430 
2431 	/* Wait for invalidation complete */
2432 	for (i = 0; i < usec_timeout; i++) {
2433 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2434 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2435 					INVALIDATE_CACHE_COMPLETE))
2436 			break;
2437 		udelay(1);
2438 	}
2439 
2440 	if (i >= usec_timeout) {
2441 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2442 		return -EINVAL;
2443 	}
2444 
2445 	if (amdgpu_emu_mode == 1)
2446 		adev->hdp.funcs->flush_hdp(adev, NULL);
2447 
2448 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2449 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2450 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2451 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2452 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2453 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2454 
2455 	/* Program pfp ucode address into intruction cache address register */
2456 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2457 			lower_32_bits(addr) & 0xFFFFF000);
2458 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2459 			upper_32_bits(addr));
2460 
2461 	return 0;
2462 }
2463 
2464 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2465 {
2466 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2467 	uint32_t tmp;
2468 	int i;
2469 
2470 	/* Trigger an invalidation of the L1 instruction caches */
2471 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2472 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2473 
2474 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2475 
2476 	/* Wait for invalidation complete */
2477 	for (i = 0; i < usec_timeout; i++) {
2478 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2479 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2480 					INVALIDATE_CACHE_COMPLETE))
2481 			break;
2482 		udelay(1);
2483 	}
2484 
2485 	if (i >= usec_timeout) {
2486 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2487 		return -EINVAL;
2488 	}
2489 
2490 	if (amdgpu_emu_mode == 1)
2491 		adev->hdp.funcs->flush_hdp(adev, NULL);
2492 
2493 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2494 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2495 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2496 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2497 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2498 
2499 	/* Program mec1 ucode address into intruction cache address register */
2500 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2501 			lower_32_bits(addr) & 0xFFFFF000);
2502 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2503 			upper_32_bits(addr));
2504 
2505 	return 0;
2506 }
2507 
2508 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2509 {
2510 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2511 	uint32_t tmp;
2512 	unsigned i, pipe_id;
2513 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2514 
2515 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2516 		adev->gfx.pfp_fw->data;
2517 
2518 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2519 		lower_32_bits(addr));
2520 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2521 		upper_32_bits(addr));
2522 
2523 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2524 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2525 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2526 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2527 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2528 
2529 	/*
2530 	 * Programming any of the CP_PFP_IC_BASE registers
2531 	 * forces invalidation of the ME L1 I$. Wait for the
2532 	 * invalidation complete
2533 	 */
2534 	for (i = 0; i < usec_timeout; i++) {
2535 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2536 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2537 			INVALIDATE_CACHE_COMPLETE))
2538 			break;
2539 		udelay(1);
2540 	}
2541 
2542 	if (i >= usec_timeout) {
2543 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2544 		return -EINVAL;
2545 	}
2546 
2547 	/* Prime the L1 instruction caches */
2548 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2549 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2550 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2551 	/* Waiting for cache primed*/
2552 	for (i = 0; i < usec_timeout; i++) {
2553 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2554 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2555 			ICACHE_PRIMED))
2556 			break;
2557 		udelay(1);
2558 	}
2559 
2560 	if (i >= usec_timeout) {
2561 		dev_err(adev->dev, "failed to prime instruction cache\n");
2562 		return -EINVAL;
2563 	}
2564 
2565 	mutex_lock(&adev->srbm_mutex);
2566 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2567 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2568 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2569 			(pfp_hdr->ucode_start_addr_hi << 30) |
2570 			(pfp_hdr->ucode_start_addr_lo >> 2));
2571 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2572 			pfp_hdr->ucode_start_addr_hi >> 2);
2573 
2574 		/*
2575 		 * Program CP_ME_CNTL to reset given PIPE to take
2576 		 * effect of CP_PFP_PRGRM_CNTR_START.
2577 		 */
2578 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2579 		if (pipe_id == 0)
2580 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2581 					PFP_PIPE0_RESET, 1);
2582 		else
2583 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2584 					PFP_PIPE1_RESET, 1);
2585 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2586 
2587 		/* Clear pfp pipe0 reset bit. */
2588 		if (pipe_id == 0)
2589 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2590 					PFP_PIPE0_RESET, 0);
2591 		else
2592 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2593 					PFP_PIPE1_RESET, 0);
2594 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2595 
2596 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2597 			lower_32_bits(addr2));
2598 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2599 			upper_32_bits(addr2));
2600 	}
2601 	soc21_grbm_select(adev, 0, 0, 0, 0);
2602 	mutex_unlock(&adev->srbm_mutex);
2603 
2604 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2605 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2606 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2607 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2608 
2609 	/* Invalidate the data caches */
2610 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2611 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2612 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2613 
2614 	for (i = 0; i < usec_timeout; i++) {
2615 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2616 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2617 			INVALIDATE_DCACHE_COMPLETE))
2618 			break;
2619 		udelay(1);
2620 	}
2621 
2622 	if (i >= usec_timeout) {
2623 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2624 		return -EINVAL;
2625 	}
2626 
2627 	return 0;
2628 }
2629 
2630 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2631 {
2632 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2633 	uint32_t tmp;
2634 	unsigned i, pipe_id;
2635 	const struct gfx_firmware_header_v2_0 *me_hdr;
2636 
2637 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2638 		adev->gfx.me_fw->data;
2639 
2640 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2641 		lower_32_bits(addr));
2642 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2643 		upper_32_bits(addr));
2644 
2645 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2646 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2647 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2648 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2649 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2650 
2651 	/*
2652 	 * Programming any of the CP_ME_IC_BASE registers
2653 	 * forces invalidation of the ME L1 I$. Wait for the
2654 	 * invalidation complete
2655 	 */
2656 	for (i = 0; i < usec_timeout; i++) {
2657 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2658 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2659 			INVALIDATE_CACHE_COMPLETE))
2660 			break;
2661 		udelay(1);
2662 	}
2663 
2664 	if (i >= usec_timeout) {
2665 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2666 		return -EINVAL;
2667 	}
2668 
2669 	/* Prime the instruction caches */
2670 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2671 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2672 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2673 
2674 	/* Waiting for instruction cache primed*/
2675 	for (i = 0; i < usec_timeout; i++) {
2676 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2677 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2678 			ICACHE_PRIMED))
2679 			break;
2680 		udelay(1);
2681 	}
2682 
2683 	if (i >= usec_timeout) {
2684 		dev_err(adev->dev, "failed to prime instruction cache\n");
2685 		return -EINVAL;
2686 	}
2687 
2688 	mutex_lock(&adev->srbm_mutex);
2689 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2690 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2691 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2692 			(me_hdr->ucode_start_addr_hi << 30) |
2693 			(me_hdr->ucode_start_addr_lo >> 2) );
2694 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2695 			me_hdr->ucode_start_addr_hi>>2);
2696 
2697 		/*
2698 		 * Program CP_ME_CNTL to reset given PIPE to take
2699 		 * effect of CP_PFP_PRGRM_CNTR_START.
2700 		 */
2701 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2702 		if (pipe_id == 0)
2703 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2704 					ME_PIPE0_RESET, 1);
2705 		else
2706 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2707 					ME_PIPE1_RESET, 1);
2708 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2709 
2710 		/* Clear pfp pipe0 reset bit. */
2711 		if (pipe_id == 0)
2712 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2713 					ME_PIPE0_RESET, 0);
2714 		else
2715 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2716 					ME_PIPE1_RESET, 0);
2717 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2718 
2719 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2720 			lower_32_bits(addr2));
2721 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2722 			upper_32_bits(addr2));
2723 	}
2724 	soc21_grbm_select(adev, 0, 0, 0, 0);
2725 	mutex_unlock(&adev->srbm_mutex);
2726 
2727 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2728 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2729 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2730 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2731 
2732 	/* Invalidate the data caches */
2733 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2734 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2735 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2736 
2737 	for (i = 0; i < usec_timeout; i++) {
2738 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2739 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2740 			INVALIDATE_DCACHE_COMPLETE))
2741 			break;
2742 		udelay(1);
2743 	}
2744 
2745 	if (i >= usec_timeout) {
2746 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2747 		return -EINVAL;
2748 	}
2749 
2750 	return 0;
2751 }
2752 
2753 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2754 {
2755 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2756 	uint32_t tmp;
2757 	unsigned i;
2758 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2759 
2760 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2761 		adev->gfx.mec_fw->data;
2762 
2763 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2764 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2765 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2766 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2767 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2768 
2769 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2770 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2771 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2772 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2773 
2774 	mutex_lock(&adev->srbm_mutex);
2775 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2776 		soc21_grbm_select(adev, 1, i, 0, 0);
2777 
2778 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2779 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2780 		     upper_32_bits(addr2));
2781 
2782 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2783 					mec_hdr->ucode_start_addr_lo >> 2 |
2784 					mec_hdr->ucode_start_addr_hi << 30);
2785 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2786 					mec_hdr->ucode_start_addr_hi >> 2);
2787 
2788 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2789 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2790 		     upper_32_bits(addr));
2791 	}
2792 	mutex_unlock(&adev->srbm_mutex);
2793 	soc21_grbm_select(adev, 0, 0, 0, 0);
2794 
2795 	/* Trigger an invalidation of the L1 instruction caches */
2796 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2797 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2798 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2799 
2800 	/* Wait for invalidation complete */
2801 	for (i = 0; i < usec_timeout; i++) {
2802 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2803 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2804 				       INVALIDATE_DCACHE_COMPLETE))
2805 			break;
2806 		udelay(1);
2807 	}
2808 
2809 	if (i >= usec_timeout) {
2810 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2811 		return -EINVAL;
2812 	}
2813 
2814 	/* Trigger an invalidation of the L1 instruction caches */
2815 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2816 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2817 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2818 
2819 	/* Wait for invalidation complete */
2820 	for (i = 0; i < usec_timeout; i++) {
2821 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2822 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2823 				       INVALIDATE_CACHE_COMPLETE))
2824 			break;
2825 		udelay(1);
2826 	}
2827 
2828 	if (i >= usec_timeout) {
2829 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2830 		return -EINVAL;
2831 	}
2832 
2833 	return 0;
2834 }
2835 
2836 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2837 {
2838 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2839 	const struct gfx_firmware_header_v2_0 *me_hdr;
2840 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2841 	uint32_t pipe_id, tmp;
2842 
2843 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2844 		adev->gfx.mec_fw->data;
2845 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2846 		adev->gfx.me_fw->data;
2847 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2848 		adev->gfx.pfp_fw->data;
2849 
2850 	/* config pfp program start addr */
2851 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2852 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2853 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2854 			(pfp_hdr->ucode_start_addr_hi << 30) |
2855 			(pfp_hdr->ucode_start_addr_lo >> 2));
2856 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2857 			pfp_hdr->ucode_start_addr_hi >> 2);
2858 	}
2859 	soc21_grbm_select(adev, 0, 0, 0, 0);
2860 
2861 	/* reset pfp pipe */
2862 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2863 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2864 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2865 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2866 
2867 	/* clear pfp pipe reset */
2868 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2869 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2870 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2871 
2872 	/* config me program start addr */
2873 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2874 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2875 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2876 			(me_hdr->ucode_start_addr_hi << 30) |
2877 			(me_hdr->ucode_start_addr_lo >> 2) );
2878 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2879 			me_hdr->ucode_start_addr_hi>>2);
2880 	}
2881 	soc21_grbm_select(adev, 0, 0, 0, 0);
2882 
2883 	/* reset me pipe */
2884 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2885 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2886 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2887 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2888 
2889 	/* clear me pipe reset */
2890 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2891 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2892 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2893 
2894 	/* config mec program start addr */
2895 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2896 		soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2897 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2898 					mec_hdr->ucode_start_addr_lo >> 2 |
2899 					mec_hdr->ucode_start_addr_hi << 30);
2900 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2901 					mec_hdr->ucode_start_addr_hi >> 2);
2902 	}
2903 	soc21_grbm_select(adev, 0, 0, 0, 0);
2904 
2905 	/* reset mec pipe */
2906 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2907 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
2908 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
2909 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
2910 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
2911 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2912 
2913 	/* clear mec pipe reset */
2914 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
2915 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
2916 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
2917 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
2918 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2919 }
2920 
2921 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2922 {
2923 	uint32_t cp_status;
2924 	uint32_t bootload_status;
2925 	int i, r;
2926 	uint64_t addr, addr2;
2927 
2928 	for (i = 0; i < adev->usec_timeout; i++) {
2929 		cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2930 
2931 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
2932 			    IP_VERSION(11, 0, 1) ||
2933 		    amdgpu_ip_version(adev, GC_HWIP, 0) ==
2934 			    IP_VERSION(11, 0, 4) ||
2935 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) ||
2936 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) ||
2937 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2) ||
2938 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 3))
2939 			bootload_status = RREG32_SOC15(GC, 0,
2940 					regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
2941 		else
2942 			bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2943 
2944 		if ((cp_status == 0) &&
2945 		    (REG_GET_FIELD(bootload_status,
2946 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2947 			break;
2948 		}
2949 		udelay(1);
2950 	}
2951 
2952 	if (i >= adev->usec_timeout) {
2953 		dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2954 		return -ETIMEDOUT;
2955 	}
2956 
2957 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2958 		if (adev->gfx.rs64_enable) {
2959 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2960 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
2961 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2962 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
2963 			r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
2964 			if (r)
2965 				return r;
2966 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2967 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
2968 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2969 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
2970 			r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
2971 			if (r)
2972 				return r;
2973 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2974 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
2975 			addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2976 				rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
2977 			r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
2978 			if (r)
2979 				return r;
2980 		} else {
2981 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2982 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
2983 			r = gfx_v11_0_config_me_cache(adev, addr);
2984 			if (r)
2985 				return r;
2986 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2987 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
2988 			r = gfx_v11_0_config_pfp_cache(adev, addr);
2989 			if (r)
2990 				return r;
2991 			addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2992 				rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
2993 			r = gfx_v11_0_config_mec_cache(adev, addr);
2994 			if (r)
2995 				return r;
2996 		}
2997 	}
2998 
2999 	return 0;
3000 }
3001 
3002 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3003 {
3004 	int i;
3005 	u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3006 
3007 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3008 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3009 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3010 
3011 	for (i = 0; i < adev->usec_timeout; i++) {
3012 		if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
3013 			break;
3014 		udelay(1);
3015 	}
3016 
3017 	if (i >= adev->usec_timeout)
3018 		DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
3019 
3020 	return 0;
3021 }
3022 
3023 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
3024 {
3025 	int r;
3026 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
3027 	const __le32 *fw_data;
3028 	unsigned i, fw_size;
3029 
3030 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3031 		adev->gfx.pfp_fw->data;
3032 
3033 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3034 
3035 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
3036 		le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3037 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
3038 
3039 	r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
3040 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3041 				      &adev->gfx.pfp.pfp_fw_obj,
3042 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
3043 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
3044 	if (r) {
3045 		dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
3046 		gfx_v11_0_pfp_fini(adev);
3047 		return r;
3048 	}
3049 
3050 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
3051 
3052 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
3053 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
3054 
3055 	gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
3056 
3057 	WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
3058 
3059 	for (i = 0; i < pfp_hdr->jt_size; i++)
3060 		WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
3061 			     le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
3062 
3063 	WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3064 
3065 	return 0;
3066 }
3067 
3068 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
3069 {
3070 	int r;
3071 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
3072 	const __le32 *fw_ucode, *fw_data;
3073 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3074 	uint32_t tmp;
3075 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
3076 
3077 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
3078 		adev->gfx.pfp_fw->data;
3079 
3080 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3081 
3082 	/* instruction */
3083 	fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
3084 		le32_to_cpu(pfp_hdr->ucode_offset_bytes));
3085 	fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
3086 	/* data */
3087 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
3088 		le32_to_cpu(pfp_hdr->data_offset_bytes));
3089 	fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
3090 
3091 	/* 64kb align */
3092 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3093 				      64 * 1024,
3094 				      AMDGPU_GEM_DOMAIN_VRAM |
3095 				      AMDGPU_GEM_DOMAIN_GTT,
3096 				      &adev->gfx.pfp.pfp_fw_obj,
3097 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
3098 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
3099 	if (r) {
3100 		dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
3101 		gfx_v11_0_pfp_fini(adev);
3102 		return r;
3103 	}
3104 
3105 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3106 				      64 * 1024,
3107 				      AMDGPU_GEM_DOMAIN_VRAM |
3108 				      AMDGPU_GEM_DOMAIN_GTT,
3109 				      &adev->gfx.pfp.pfp_fw_data_obj,
3110 				      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
3111 				      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
3112 	if (r) {
3113 		dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
3114 		gfx_v11_0_pfp_fini(adev);
3115 		return r;
3116 	}
3117 
3118 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
3119 	memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
3120 
3121 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
3122 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
3123 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
3124 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
3125 
3126 	if (amdgpu_emu_mode == 1)
3127 		adev->hdp.funcs->flush_hdp(adev, NULL);
3128 
3129 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
3130 		lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
3131 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
3132 		upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
3133 
3134 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
3135 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
3136 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
3137 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
3138 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
3139 
3140 	/*
3141 	 * Programming any of the CP_PFP_IC_BASE registers
3142 	 * forces invalidation of the ME L1 I$. Wait for the
3143 	 * invalidation complete
3144 	 */
3145 	for (i = 0; i < usec_timeout; i++) {
3146 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3147 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3148 			INVALIDATE_CACHE_COMPLETE))
3149 			break;
3150 		udelay(1);
3151 	}
3152 
3153 	if (i >= usec_timeout) {
3154 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3155 		return -EINVAL;
3156 	}
3157 
3158 	/* Prime the L1 instruction caches */
3159 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3160 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
3161 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
3162 	/* Waiting for cache primed*/
3163 	for (i = 0; i < usec_timeout; i++) {
3164 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3165 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3166 			ICACHE_PRIMED))
3167 			break;
3168 		udelay(1);
3169 	}
3170 
3171 	if (i >= usec_timeout) {
3172 		dev_err(adev->dev, "failed to prime instruction cache\n");
3173 		return -EINVAL;
3174 	}
3175 
3176 	mutex_lock(&adev->srbm_mutex);
3177 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3178 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3179 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
3180 			(pfp_hdr->ucode_start_addr_hi << 30) |
3181 			(pfp_hdr->ucode_start_addr_lo >> 2) );
3182 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
3183 			pfp_hdr->ucode_start_addr_hi>>2);
3184 
3185 		/*
3186 		 * Program CP_ME_CNTL to reset given PIPE to take
3187 		 * effect of CP_PFP_PRGRM_CNTR_START.
3188 		 */
3189 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3190 		if (pipe_id == 0)
3191 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3192 					PFP_PIPE0_RESET, 1);
3193 		else
3194 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3195 					PFP_PIPE1_RESET, 1);
3196 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3197 
3198 		/* Clear pfp pipe0 reset bit. */
3199 		if (pipe_id == 0)
3200 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3201 					PFP_PIPE0_RESET, 0);
3202 		else
3203 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3204 					PFP_PIPE1_RESET, 0);
3205 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3206 
3207 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
3208 			lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3209 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
3210 			upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3211 	}
3212 	soc21_grbm_select(adev, 0, 0, 0, 0);
3213 	mutex_unlock(&adev->srbm_mutex);
3214 
3215 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3216 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3217 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3218 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3219 
3220 	/* Invalidate the data caches */
3221 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3222 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3223 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3224 
3225 	for (i = 0; i < usec_timeout; i++) {
3226 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3227 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3228 			INVALIDATE_DCACHE_COMPLETE))
3229 			break;
3230 		udelay(1);
3231 	}
3232 
3233 	if (i >= usec_timeout) {
3234 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3235 		return -EINVAL;
3236 	}
3237 
3238 	return 0;
3239 }
3240 
3241 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
3242 {
3243 	int r;
3244 	const struct gfx_firmware_header_v1_0 *me_hdr;
3245 	const __le32 *fw_data;
3246 	unsigned i, fw_size;
3247 
3248 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
3249 		adev->gfx.me_fw->data;
3250 
3251 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3252 
3253 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3254 		le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3255 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
3256 
3257 	r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
3258 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3259 				      &adev->gfx.me.me_fw_obj,
3260 				      &adev->gfx.me.me_fw_gpu_addr,
3261 				      (void **)&adev->gfx.me.me_fw_ptr);
3262 	if (r) {
3263 		dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
3264 		gfx_v11_0_me_fini(adev);
3265 		return r;
3266 	}
3267 
3268 	memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
3269 
3270 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3271 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3272 
3273 	gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
3274 
3275 	WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
3276 
3277 	for (i = 0; i < me_hdr->jt_size; i++)
3278 		WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
3279 			     le32_to_cpup(fw_data + me_hdr->jt_offset + i));
3280 
3281 	WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
3282 
3283 	return 0;
3284 }
3285 
3286 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
3287 {
3288 	int r;
3289 	const struct gfx_firmware_header_v2_0 *me_hdr;
3290 	const __le32 *fw_ucode, *fw_data;
3291 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3292 	uint32_t tmp;
3293 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
3294 
3295 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
3296 		adev->gfx.me_fw->data;
3297 
3298 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3299 
3300 	/* instruction */
3301 	fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
3302 		le32_to_cpu(me_hdr->ucode_offset_bytes));
3303 	fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
3304 	/* data */
3305 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3306 		le32_to_cpu(me_hdr->data_offset_bytes));
3307 	fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
3308 
3309 	/* 64kb align*/
3310 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3311 				      64 * 1024,
3312 				      AMDGPU_GEM_DOMAIN_VRAM |
3313 				      AMDGPU_GEM_DOMAIN_GTT,
3314 				      &adev->gfx.me.me_fw_obj,
3315 				      &adev->gfx.me.me_fw_gpu_addr,
3316 				      (void **)&adev->gfx.me.me_fw_ptr);
3317 	if (r) {
3318 		dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
3319 		gfx_v11_0_me_fini(adev);
3320 		return r;
3321 	}
3322 
3323 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3324 				      64 * 1024,
3325 				      AMDGPU_GEM_DOMAIN_VRAM |
3326 				      AMDGPU_GEM_DOMAIN_GTT,
3327 				      &adev->gfx.me.me_fw_data_obj,
3328 				      &adev->gfx.me.me_fw_data_gpu_addr,
3329 				      (void **)&adev->gfx.me.me_fw_data_ptr);
3330 	if (r) {
3331 		dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
3332 		gfx_v11_0_pfp_fini(adev);
3333 		return r;
3334 	}
3335 
3336 	memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
3337 	memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
3338 
3339 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3340 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
3341 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3342 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
3343 
3344 	if (amdgpu_emu_mode == 1)
3345 		adev->hdp.funcs->flush_hdp(adev, NULL);
3346 
3347 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
3348 		lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
3349 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
3350 		upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
3351 
3352 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
3353 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
3354 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
3355 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
3356 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
3357 
3358 	/*
3359 	 * Programming any of the CP_ME_IC_BASE registers
3360 	 * forces invalidation of the ME L1 I$. Wait for the
3361 	 * invalidation complete
3362 	 */
3363 	for (i = 0; i < usec_timeout; i++) {
3364 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3365 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3366 			INVALIDATE_CACHE_COMPLETE))
3367 			break;
3368 		udelay(1);
3369 	}
3370 
3371 	if (i >= usec_timeout) {
3372 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3373 		return -EINVAL;
3374 	}
3375 
3376 	/* Prime the instruction caches */
3377 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3378 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3379 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3380 
3381 	/* Waiting for instruction cache primed*/
3382 	for (i = 0; i < usec_timeout; i++) {
3383 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3384 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3385 			ICACHE_PRIMED))
3386 			break;
3387 		udelay(1);
3388 	}
3389 
3390 	if (i >= usec_timeout) {
3391 		dev_err(adev->dev, "failed to prime instruction cache\n");
3392 		return -EINVAL;
3393 	}
3394 
3395 	mutex_lock(&adev->srbm_mutex);
3396 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3397 		soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3398 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3399 			(me_hdr->ucode_start_addr_hi << 30) |
3400 			(me_hdr->ucode_start_addr_lo >> 2) );
3401 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3402 			me_hdr->ucode_start_addr_hi>>2);
3403 
3404 		/*
3405 		 * Program CP_ME_CNTL to reset given PIPE to take
3406 		 * effect of CP_PFP_PRGRM_CNTR_START.
3407 		 */
3408 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3409 		if (pipe_id == 0)
3410 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3411 					ME_PIPE0_RESET, 1);
3412 		else
3413 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3414 					ME_PIPE1_RESET, 1);
3415 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3416 
3417 		/* Clear pfp pipe0 reset bit. */
3418 		if (pipe_id == 0)
3419 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3420 					ME_PIPE0_RESET, 0);
3421 		else
3422 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3423 					ME_PIPE1_RESET, 0);
3424 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3425 
3426 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3427 			lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3428 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3429 			upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3430 	}
3431 	soc21_grbm_select(adev, 0, 0, 0, 0);
3432 	mutex_unlock(&adev->srbm_mutex);
3433 
3434 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3435 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3436 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3437 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3438 
3439 	/* Invalidate the data caches */
3440 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3441 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3442 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3443 
3444 	for (i = 0; i < usec_timeout; i++) {
3445 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3446 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3447 			INVALIDATE_DCACHE_COMPLETE))
3448 			break;
3449 		udelay(1);
3450 	}
3451 
3452 	if (i >= usec_timeout) {
3453 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3454 		return -EINVAL;
3455 	}
3456 
3457 	return 0;
3458 }
3459 
3460 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3461 {
3462 	int r;
3463 
3464 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3465 		return -EINVAL;
3466 
3467 	gfx_v11_0_cp_gfx_enable(adev, false);
3468 
3469 	if (adev->gfx.rs64_enable)
3470 		r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3471 	else
3472 		r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3473 	if (r) {
3474 		dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3475 		return r;
3476 	}
3477 
3478 	if (adev->gfx.rs64_enable)
3479 		r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3480 	else
3481 		r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3482 	if (r) {
3483 		dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3484 		return r;
3485 	}
3486 
3487 	return 0;
3488 }
3489 
3490 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3491 {
3492 	struct amdgpu_ring *ring;
3493 	const struct cs_section_def *sect = NULL;
3494 	const struct cs_extent_def *ext = NULL;
3495 	int r, i;
3496 	int ctx_reg_offset;
3497 
3498 	/* init the CP */
3499 	WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3500 		     adev->gfx.config.max_hw_contexts - 1);
3501 	WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3502 
3503 	if (!amdgpu_async_gfx_ring)
3504 		gfx_v11_0_cp_gfx_enable(adev, true);
3505 
3506 	ring = &adev->gfx.gfx_ring[0];
3507 	r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3508 	if (r) {
3509 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3510 		return r;
3511 	}
3512 
3513 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3514 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3515 
3516 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3517 	amdgpu_ring_write(ring, 0x80000000);
3518 	amdgpu_ring_write(ring, 0x80000000);
3519 
3520 	for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3521 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3522 			if (sect->id == SECT_CONTEXT) {
3523 				amdgpu_ring_write(ring,
3524 						  PACKET3(PACKET3_SET_CONTEXT_REG,
3525 							  ext->reg_count));
3526 				amdgpu_ring_write(ring, ext->reg_index -
3527 						  PACKET3_SET_CONTEXT_REG_START);
3528 				for (i = 0; i < ext->reg_count; i++)
3529 					amdgpu_ring_write(ring, ext->extent[i]);
3530 			}
3531 		}
3532 	}
3533 
3534 	ctx_reg_offset =
3535 		SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3536 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3537 	amdgpu_ring_write(ring, ctx_reg_offset);
3538 	amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3539 
3540 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3541 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3542 
3543 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3544 	amdgpu_ring_write(ring, 0);
3545 
3546 	amdgpu_ring_commit(ring);
3547 
3548 	/* submit cs packet to copy state 0 to next available state */
3549 	if (adev->gfx.num_gfx_rings > 1) {
3550 		/* maximum supported gfx ring is 2 */
3551 		ring = &adev->gfx.gfx_ring[1];
3552 		r = amdgpu_ring_alloc(ring, 2);
3553 		if (r) {
3554 			DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3555 			return r;
3556 		}
3557 
3558 		amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3559 		amdgpu_ring_write(ring, 0);
3560 
3561 		amdgpu_ring_commit(ring);
3562 	}
3563 	return 0;
3564 }
3565 
3566 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3567 					 CP_PIPE_ID pipe)
3568 {
3569 	u32 tmp;
3570 
3571 	tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3572 	tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3573 
3574 	WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3575 }
3576 
3577 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3578 					  struct amdgpu_ring *ring)
3579 {
3580 	u32 tmp;
3581 
3582 	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3583 	if (ring->use_doorbell) {
3584 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3585 				    DOORBELL_OFFSET, ring->doorbell_index);
3586 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3587 				    DOORBELL_EN, 1);
3588 	} else {
3589 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3590 				    DOORBELL_EN, 0);
3591 	}
3592 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3593 
3594 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3595 			    DOORBELL_RANGE_LOWER, ring->doorbell_index);
3596 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3597 
3598 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3599 		     CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3600 }
3601 
3602 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3603 {
3604 	struct amdgpu_ring *ring;
3605 	u32 tmp;
3606 	u32 rb_bufsz;
3607 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
3608 
3609 	/* Set the write pointer delay */
3610 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3611 
3612 	/* set the RB to use vmid 0 */
3613 	WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3614 
3615 	/* Init gfx ring 0 for pipe 0 */
3616 	mutex_lock(&adev->srbm_mutex);
3617 	gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3618 
3619 	/* Set ring buffer size */
3620 	ring = &adev->gfx.gfx_ring[0];
3621 	rb_bufsz = order_base_2(ring->ring_size / 8);
3622 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3623 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3624 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3625 
3626 	/* Initialize the ring buffer's write pointers */
3627 	ring->wptr = 0;
3628 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3629 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3630 
3631 	/* set the wb address whether it's enabled or not */
3632 	rptr_addr = ring->rptr_gpu_addr;
3633 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3634 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3635 		     CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3636 
3637 	wptr_gpu_addr = ring->wptr_gpu_addr;
3638 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3639 		     lower_32_bits(wptr_gpu_addr));
3640 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3641 		     upper_32_bits(wptr_gpu_addr));
3642 
3643 	mdelay(1);
3644 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3645 
3646 	rb_addr = ring->gpu_addr >> 8;
3647 	WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3648 	WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3649 
3650 	WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3651 
3652 	gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3653 	mutex_unlock(&adev->srbm_mutex);
3654 
3655 	/* Init gfx ring 1 for pipe 1 */
3656 	if (adev->gfx.num_gfx_rings > 1) {
3657 		mutex_lock(&adev->srbm_mutex);
3658 		gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3659 		/* maximum supported gfx ring is 2 */
3660 		ring = &adev->gfx.gfx_ring[1];
3661 		rb_bufsz = order_base_2(ring->ring_size / 8);
3662 		tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3663 		tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3664 		WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3665 		/* Initialize the ring buffer's write pointers */
3666 		ring->wptr = 0;
3667 		WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3668 		WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3669 		/* Set the wb address whether it's enabled or not */
3670 		rptr_addr = ring->rptr_gpu_addr;
3671 		WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3672 		WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3673 			     CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3674 		wptr_gpu_addr = ring->wptr_gpu_addr;
3675 		WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3676 			     lower_32_bits(wptr_gpu_addr));
3677 		WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3678 			     upper_32_bits(wptr_gpu_addr));
3679 
3680 		mdelay(1);
3681 		WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3682 
3683 		rb_addr = ring->gpu_addr >> 8;
3684 		WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3685 		WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3686 		WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3687 
3688 		gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3689 		mutex_unlock(&adev->srbm_mutex);
3690 	}
3691 	/* Switch to pipe 0 */
3692 	mutex_lock(&adev->srbm_mutex);
3693 	gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3694 	mutex_unlock(&adev->srbm_mutex);
3695 
3696 	/* start the ring */
3697 	gfx_v11_0_cp_gfx_start(adev);
3698 
3699 	return 0;
3700 }
3701 
3702 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3703 {
3704 	u32 data;
3705 
3706 	if (adev->gfx.rs64_enable) {
3707 		data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3708 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3709 							 enable ? 0 : 1);
3710 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3711 							 enable ? 0 : 1);
3712 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3713 							 enable ? 0 : 1);
3714 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3715 							 enable ? 0 : 1);
3716 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3717 							 enable ? 0 : 1);
3718 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3719 							 enable ? 1 : 0);
3720 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3721 				                         enable ? 1 : 0);
3722 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3723 							 enable ? 1 : 0);
3724 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3725 							 enable ? 1 : 0);
3726 		data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3727 							 enable ? 0 : 1);
3728 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3729 	} else {
3730 		data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3731 
3732 		if (enable) {
3733 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3734 			if (!adev->enable_mes_kiq)
3735 				data = REG_SET_FIELD(data, CP_MEC_CNTL,
3736 						     MEC_ME2_HALT, 0);
3737 		} else {
3738 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3739 			data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3740 		}
3741 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3742 	}
3743 
3744 	udelay(50);
3745 }
3746 
3747 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3748 {
3749 	const struct gfx_firmware_header_v1_0 *mec_hdr;
3750 	const __le32 *fw_data;
3751 	unsigned i, fw_size;
3752 	u32 *fw = NULL;
3753 	int r;
3754 
3755 	if (!adev->gfx.mec_fw)
3756 		return -EINVAL;
3757 
3758 	gfx_v11_0_cp_compute_enable(adev, false);
3759 
3760 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3761 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3762 
3763 	fw_data = (const __le32 *)
3764 		(adev->gfx.mec_fw->data +
3765 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3766 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3767 
3768 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3769 					  PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3770 					  &adev->gfx.mec.mec_fw_obj,
3771 					  &adev->gfx.mec.mec_fw_gpu_addr,
3772 					  (void **)&fw);
3773 	if (r) {
3774 		dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3775 		gfx_v11_0_mec_fini(adev);
3776 		return r;
3777 	}
3778 
3779 	memcpy(fw, fw_data, fw_size);
3780 
3781 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3782 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3783 
3784 	gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3785 
3786 	/* MEC1 */
3787 	WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3788 
3789 	for (i = 0; i < mec_hdr->jt_size; i++)
3790 		WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3791 			     le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3792 
3793 	WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3794 
3795 	return 0;
3796 }
3797 
3798 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3799 {
3800 	const struct gfx_firmware_header_v2_0 *mec_hdr;
3801 	const __le32 *fw_ucode, *fw_data;
3802 	u32 tmp, fw_ucode_size, fw_data_size;
3803 	u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3804 	u32 *fw_ucode_ptr, *fw_data_ptr;
3805 	int r;
3806 
3807 	if (!adev->gfx.mec_fw)
3808 		return -EINVAL;
3809 
3810 	gfx_v11_0_cp_compute_enable(adev, false);
3811 
3812 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3813 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3814 
3815 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3816 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
3817 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3818 
3819 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3820 				le32_to_cpu(mec_hdr->data_offset_bytes));
3821 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3822 
3823 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3824 				      64 * 1024,
3825 				      AMDGPU_GEM_DOMAIN_VRAM |
3826 				      AMDGPU_GEM_DOMAIN_GTT,
3827 				      &adev->gfx.mec.mec_fw_obj,
3828 				      &adev->gfx.mec.mec_fw_gpu_addr,
3829 				      (void **)&fw_ucode_ptr);
3830 	if (r) {
3831 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3832 		gfx_v11_0_mec_fini(adev);
3833 		return r;
3834 	}
3835 
3836 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
3837 				      64 * 1024,
3838 				      AMDGPU_GEM_DOMAIN_VRAM |
3839 				      AMDGPU_GEM_DOMAIN_GTT,
3840 				      &adev->gfx.mec.mec_fw_data_obj,
3841 				      &adev->gfx.mec.mec_fw_data_gpu_addr,
3842 				      (void **)&fw_data_ptr);
3843 	if (r) {
3844 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3845 		gfx_v11_0_mec_fini(adev);
3846 		return r;
3847 	}
3848 
3849 	memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3850 	memcpy(fw_data_ptr, fw_data, fw_data_size);
3851 
3852 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3853 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3854 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3855 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3856 
3857 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3858 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3859 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3860 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3861 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3862 
3863 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3864 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3865 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3866 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3867 
3868 	mutex_lock(&adev->srbm_mutex);
3869 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3870 		soc21_grbm_select(adev, 1, i, 0, 0);
3871 
3872 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3873 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3874 		     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3875 
3876 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3877 					mec_hdr->ucode_start_addr_lo >> 2 |
3878 					mec_hdr->ucode_start_addr_hi << 30);
3879 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3880 					mec_hdr->ucode_start_addr_hi >> 2);
3881 
3882 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3883 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3884 		     upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3885 	}
3886 	mutex_unlock(&adev->srbm_mutex);
3887 	soc21_grbm_select(adev, 0, 0, 0, 0);
3888 
3889 	/* Trigger an invalidation of the L1 instruction caches */
3890 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3891 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3892 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3893 
3894 	/* Wait for invalidation complete */
3895 	for (i = 0; i < usec_timeout; i++) {
3896 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3897 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3898 				       INVALIDATE_DCACHE_COMPLETE))
3899 			break;
3900 		udelay(1);
3901 	}
3902 
3903 	if (i >= usec_timeout) {
3904 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3905 		return -EINVAL;
3906 	}
3907 
3908 	/* Trigger an invalidation of the L1 instruction caches */
3909 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3910 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
3911 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
3912 
3913 	/* Wait for invalidation complete */
3914 	for (i = 0; i < usec_timeout; i++) {
3915 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3916 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
3917 				       INVALIDATE_CACHE_COMPLETE))
3918 			break;
3919 		udelay(1);
3920 	}
3921 
3922 	if (i >= usec_timeout) {
3923 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
3924 		return -EINVAL;
3925 	}
3926 
3927 	return 0;
3928 }
3929 
3930 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
3931 {
3932 	uint32_t tmp;
3933 	struct amdgpu_device *adev = ring->adev;
3934 
3935 	/* tell RLC which is KIQ queue */
3936 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3937 	tmp &= 0xffffff00;
3938 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3939 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
3940 }
3941 
3942 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
3943 {
3944 	/* set graphics engine doorbell range */
3945 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
3946 		     (adev->doorbell_index.gfx_ring0 * 2) << 2);
3947 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3948 		     (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
3949 
3950 	/* set compute engine doorbell range */
3951 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3952 		     (adev->doorbell_index.kiq * 2) << 2);
3953 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3954 		     (adev->doorbell_index.userqueue_end * 2) << 2);
3955 }
3956 
3957 static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
3958 					   struct v11_gfx_mqd *mqd,
3959 					   struct amdgpu_mqd_prop *prop)
3960 {
3961 	bool priority = 0;
3962 	u32 tmp;
3963 
3964 	/* set up default queue priority level
3965 	 * 0x0 = low priority, 0x1 = high priority
3966 	 */
3967 	if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
3968 		priority = 1;
3969 
3970 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
3971 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
3972 	mqd->cp_gfx_hqd_queue_priority = tmp;
3973 }
3974 
3975 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
3976 				  struct amdgpu_mqd_prop *prop)
3977 {
3978 	struct v11_gfx_mqd *mqd = m;
3979 	uint64_t hqd_gpu_addr, wb_gpu_addr;
3980 	uint32_t tmp;
3981 	uint32_t rb_bufsz;
3982 
3983 	/* set up gfx hqd wptr */
3984 	mqd->cp_gfx_hqd_wptr = 0;
3985 	mqd->cp_gfx_hqd_wptr_hi = 0;
3986 
3987 	/* set the pointer to the MQD */
3988 	mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
3989 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3990 
3991 	/* set up mqd control */
3992 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
3993 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3994 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3995 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3996 	mqd->cp_gfx_mqd_control = tmp;
3997 
3998 	/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3999 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
4000 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
4001 	mqd->cp_gfx_hqd_vmid = 0;
4002 
4003 	/* set up gfx queue priority */
4004 	gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
4005 
4006 	/* set up time quantum */
4007 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
4008 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
4009 	mqd->cp_gfx_hqd_quantum = tmp;
4010 
4011 	/* set up gfx hqd base. this is similar as CP_RB_BASE */
4012 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
4013 	mqd->cp_gfx_hqd_base = hqd_gpu_addr;
4014 	mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
4015 
4016 	/* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
4017 	wb_gpu_addr = prop->rptr_gpu_addr;
4018 	mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
4019 	mqd->cp_gfx_hqd_rptr_addr_hi =
4020 		upper_32_bits(wb_gpu_addr) & 0xffff;
4021 
4022 	/* set up rb_wptr_poll addr */
4023 	wb_gpu_addr = prop->wptr_gpu_addr;
4024 	mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4025 	mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4026 
4027 	/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
4028 	rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
4029 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
4030 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
4031 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
4032 #ifdef __BIG_ENDIAN
4033 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
4034 #endif
4035 	mqd->cp_gfx_hqd_cntl = tmp;
4036 
4037 	/* set up cp_doorbell_control */
4038 	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
4039 	if (prop->use_doorbell) {
4040 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4041 				    DOORBELL_OFFSET, prop->doorbell_index);
4042 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4043 				    DOORBELL_EN, 1);
4044 	} else
4045 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4046 				    DOORBELL_EN, 0);
4047 	mqd->cp_rb_doorbell_control = tmp;
4048 
4049 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4050 	mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
4051 
4052 	/* active the queue */
4053 	mqd->cp_gfx_hqd_active = 1;
4054 
4055 	return 0;
4056 }
4057 
4058 static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
4059 {
4060 	struct amdgpu_device *adev = ring->adev;
4061 	struct v11_gfx_mqd *mqd = ring->mqd_ptr;
4062 	int mqd_idx = ring - &adev->gfx.gfx_ring[0];
4063 
4064 	if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
4065 		memset((void *)mqd, 0, sizeof(*mqd));
4066 		mutex_lock(&adev->srbm_mutex);
4067 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4068 		amdgpu_ring_init_mqd(ring);
4069 		soc21_grbm_select(adev, 0, 0, 0, 0);
4070 		mutex_unlock(&adev->srbm_mutex);
4071 		if (adev->gfx.me.mqd_backup[mqd_idx])
4072 			memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4073 	} else {
4074 		/* restore mqd with the backup copy */
4075 		if (adev->gfx.me.mqd_backup[mqd_idx])
4076 			memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
4077 		/* reset the ring */
4078 		ring->wptr = 0;
4079 		*ring->wptr_cpu_addr = 0;
4080 		amdgpu_ring_clear_ring(ring);
4081 	}
4082 
4083 	return 0;
4084 }
4085 
4086 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
4087 {
4088 	int r, i;
4089 	struct amdgpu_ring *ring;
4090 
4091 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4092 		ring = &adev->gfx.gfx_ring[i];
4093 
4094 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
4095 		if (unlikely(r != 0))
4096 			return r;
4097 
4098 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4099 		if (!r) {
4100 			r = gfx_v11_0_kgq_init_queue(ring, false);
4101 			amdgpu_bo_kunmap(ring->mqd_obj);
4102 			ring->mqd_ptr = NULL;
4103 		}
4104 		amdgpu_bo_unreserve(ring->mqd_obj);
4105 		if (r)
4106 			return r;
4107 	}
4108 
4109 	r = amdgpu_gfx_enable_kgq(adev, 0);
4110 	if (r)
4111 		return r;
4112 
4113 	return gfx_v11_0_cp_gfx_start(adev);
4114 }
4115 
4116 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
4117 				      struct amdgpu_mqd_prop *prop)
4118 {
4119 	struct v11_compute_mqd *mqd = m;
4120 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4121 	uint32_t tmp;
4122 
4123 	mqd->header = 0xC0310800;
4124 	mqd->compute_pipelinestat_enable = 0x00000001;
4125 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4126 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4127 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4128 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4129 	mqd->compute_misc_reserved = 0x00000007;
4130 
4131 	eop_base_addr = prop->eop_gpu_addr >> 8;
4132 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4133 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4134 
4135 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4136 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
4137 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4138 			(order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
4139 
4140 	mqd->cp_hqd_eop_control = tmp;
4141 
4142 	/* enable doorbell? */
4143 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
4144 
4145 	if (prop->use_doorbell) {
4146 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4147 				    DOORBELL_OFFSET, prop->doorbell_index);
4148 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4149 				    DOORBELL_EN, 1);
4150 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4151 				    DOORBELL_SOURCE, 0);
4152 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4153 				    DOORBELL_HIT, 0);
4154 	} else {
4155 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4156 				    DOORBELL_EN, 0);
4157 	}
4158 
4159 	mqd->cp_hqd_pq_doorbell_control = tmp;
4160 
4161 	/* disable the queue if it's active */
4162 	mqd->cp_hqd_dequeue_request = 0;
4163 	mqd->cp_hqd_pq_rptr = 0;
4164 	mqd->cp_hqd_pq_wptr_lo = 0;
4165 	mqd->cp_hqd_pq_wptr_hi = 0;
4166 
4167 	/* set the pointer to the MQD */
4168 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
4169 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
4170 
4171 	/* set MQD vmid to 0 */
4172 	tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
4173 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4174 	mqd->cp_mqd_control = tmp;
4175 
4176 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4177 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
4178 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4179 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4180 
4181 	/* set up the HQD, this is similar to CP_RB0_CNTL */
4182 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
4183 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4184 			    (order_base_2(prop->queue_size / 4) - 1));
4185 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4186 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4187 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
4188 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
4189 			    prop->allow_tunneling);
4190 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4191 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4192 	mqd->cp_hqd_pq_control = tmp;
4193 
4194 	/* set the wb address whether it's enabled or not */
4195 	wb_gpu_addr = prop->rptr_gpu_addr;
4196 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4197 	mqd->cp_hqd_pq_rptr_report_addr_hi =
4198 		upper_32_bits(wb_gpu_addr) & 0xffff;
4199 
4200 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4201 	wb_gpu_addr = prop->wptr_gpu_addr;
4202 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4203 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4204 
4205 	tmp = 0;
4206 	/* enable the doorbell if requested */
4207 	if (prop->use_doorbell) {
4208 		tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
4209 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4210 				DOORBELL_OFFSET, prop->doorbell_index);
4211 
4212 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4213 				    DOORBELL_EN, 1);
4214 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4215 				    DOORBELL_SOURCE, 0);
4216 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4217 				    DOORBELL_HIT, 0);
4218 	}
4219 
4220 	mqd->cp_hqd_pq_doorbell_control = tmp;
4221 
4222 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4223 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
4224 
4225 	/* set the vmid for the queue */
4226 	mqd->cp_hqd_vmid = 0;
4227 
4228 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
4229 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
4230 	mqd->cp_hqd_persistent_state = tmp;
4231 
4232 	/* set MIN_IB_AVAIL_SIZE */
4233 	tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
4234 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4235 	mqd->cp_hqd_ib_control = tmp;
4236 
4237 	/* set static priority for a compute queue/ring */
4238 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
4239 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
4240 
4241 	mqd->cp_hqd_active = prop->hqd_active;
4242 
4243 	return 0;
4244 }
4245 
4246 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
4247 {
4248 	struct amdgpu_device *adev = ring->adev;
4249 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4250 	int j;
4251 
4252 	/* inactivate the queue */
4253 	if (amdgpu_sriov_vf(adev))
4254 		WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
4255 
4256 	/* disable wptr polling */
4257 	WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4258 
4259 	/* write the EOP addr */
4260 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
4261 	       mqd->cp_hqd_eop_base_addr_lo);
4262 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
4263 	       mqd->cp_hqd_eop_base_addr_hi);
4264 
4265 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4266 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
4267 	       mqd->cp_hqd_eop_control);
4268 
4269 	/* enable doorbell? */
4270 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4271 	       mqd->cp_hqd_pq_doorbell_control);
4272 
4273 	/* disable the queue if it's active */
4274 	if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
4275 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
4276 		for (j = 0; j < adev->usec_timeout; j++) {
4277 			if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
4278 				break;
4279 			udelay(1);
4280 		}
4281 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
4282 		       mqd->cp_hqd_dequeue_request);
4283 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
4284 		       mqd->cp_hqd_pq_rptr);
4285 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4286 		       mqd->cp_hqd_pq_wptr_lo);
4287 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4288 		       mqd->cp_hqd_pq_wptr_hi);
4289 	}
4290 
4291 	/* set the pointer to the MQD */
4292 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
4293 	       mqd->cp_mqd_base_addr_lo);
4294 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
4295 	       mqd->cp_mqd_base_addr_hi);
4296 
4297 	/* set MQD vmid to 0 */
4298 	WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
4299 	       mqd->cp_mqd_control);
4300 
4301 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4302 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
4303 	       mqd->cp_hqd_pq_base_lo);
4304 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
4305 	       mqd->cp_hqd_pq_base_hi);
4306 
4307 	/* set up the HQD, this is similar to CP_RB0_CNTL */
4308 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
4309 	       mqd->cp_hqd_pq_control);
4310 
4311 	/* set the wb address whether it's enabled or not */
4312 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
4313 		mqd->cp_hqd_pq_rptr_report_addr_lo);
4314 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
4315 		mqd->cp_hqd_pq_rptr_report_addr_hi);
4316 
4317 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4318 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
4319 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
4320 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
4321 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
4322 
4323 	/* enable the doorbell if requested */
4324 	if (ring->use_doorbell) {
4325 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
4326 			(adev->doorbell_index.kiq * 2) << 2);
4327 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
4328 			(adev->doorbell_index.userqueue_end * 2) << 2);
4329 	}
4330 
4331 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4332 	       mqd->cp_hqd_pq_doorbell_control);
4333 
4334 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4335 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4336 	       mqd->cp_hqd_pq_wptr_lo);
4337 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4338 	       mqd->cp_hqd_pq_wptr_hi);
4339 
4340 	/* set the vmid for the queue */
4341 	WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
4342 
4343 	WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
4344 	       mqd->cp_hqd_persistent_state);
4345 
4346 	/* activate the queue */
4347 	WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
4348 	       mqd->cp_hqd_active);
4349 
4350 	if (ring->use_doorbell)
4351 		WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4352 
4353 	return 0;
4354 }
4355 
4356 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
4357 {
4358 	struct amdgpu_device *adev = ring->adev;
4359 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4360 
4361 	gfx_v11_0_kiq_setting(ring);
4362 
4363 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4364 		/* reset MQD to a clean status */
4365 		if (adev->gfx.kiq[0].mqd_backup)
4366 			memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
4367 
4368 		/* reset ring buffer */
4369 		ring->wptr = 0;
4370 		amdgpu_ring_clear_ring(ring);
4371 
4372 		mutex_lock(&adev->srbm_mutex);
4373 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4374 		gfx_v11_0_kiq_init_register(ring);
4375 		soc21_grbm_select(adev, 0, 0, 0, 0);
4376 		mutex_unlock(&adev->srbm_mutex);
4377 	} else {
4378 		memset((void *)mqd, 0, sizeof(*mqd));
4379 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4380 			amdgpu_ring_clear_ring(ring);
4381 		mutex_lock(&adev->srbm_mutex);
4382 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4383 		amdgpu_ring_init_mqd(ring);
4384 		gfx_v11_0_kiq_init_register(ring);
4385 		soc21_grbm_select(adev, 0, 0, 0, 0);
4386 		mutex_unlock(&adev->srbm_mutex);
4387 
4388 		if (adev->gfx.kiq[0].mqd_backup)
4389 			memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
4390 	}
4391 
4392 	return 0;
4393 }
4394 
4395 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
4396 {
4397 	struct amdgpu_device *adev = ring->adev;
4398 	struct v11_compute_mqd *mqd = ring->mqd_ptr;
4399 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
4400 
4401 	if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
4402 		memset((void *)mqd, 0, sizeof(*mqd));
4403 		mutex_lock(&adev->srbm_mutex);
4404 		soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4405 		amdgpu_ring_init_mqd(ring);
4406 		soc21_grbm_select(adev, 0, 0, 0, 0);
4407 		mutex_unlock(&adev->srbm_mutex);
4408 
4409 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4410 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4411 	} else {
4412 		/* restore MQD to a clean status */
4413 		if (adev->gfx.mec.mqd_backup[mqd_idx])
4414 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4415 		/* reset ring buffer */
4416 		ring->wptr = 0;
4417 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4418 		amdgpu_ring_clear_ring(ring);
4419 	}
4420 
4421 	return 0;
4422 }
4423 
4424 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4425 {
4426 	struct amdgpu_ring *ring;
4427 	int r;
4428 
4429 	ring = &adev->gfx.kiq[0].ring;
4430 
4431 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
4432 	if (unlikely(r != 0))
4433 		return r;
4434 
4435 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4436 	if (unlikely(r != 0)) {
4437 		amdgpu_bo_unreserve(ring->mqd_obj);
4438 		return r;
4439 	}
4440 
4441 	gfx_v11_0_kiq_init_queue(ring);
4442 	amdgpu_bo_kunmap(ring->mqd_obj);
4443 	ring->mqd_ptr = NULL;
4444 	amdgpu_bo_unreserve(ring->mqd_obj);
4445 	ring->sched.ready = true;
4446 	return 0;
4447 }
4448 
4449 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4450 {
4451 	struct amdgpu_ring *ring = NULL;
4452 	int r = 0, i;
4453 
4454 	if (!amdgpu_async_gfx_ring)
4455 		gfx_v11_0_cp_compute_enable(adev, true);
4456 
4457 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4458 		ring = &adev->gfx.compute_ring[i];
4459 
4460 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
4461 		if (unlikely(r != 0))
4462 			goto done;
4463 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4464 		if (!r) {
4465 			r = gfx_v11_0_kcq_init_queue(ring, false);
4466 			amdgpu_bo_kunmap(ring->mqd_obj);
4467 			ring->mqd_ptr = NULL;
4468 		}
4469 		amdgpu_bo_unreserve(ring->mqd_obj);
4470 		if (r)
4471 			goto done;
4472 	}
4473 
4474 	r = amdgpu_gfx_enable_kcq(adev, 0);
4475 done:
4476 	return r;
4477 }
4478 
4479 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4480 {
4481 	int r, i;
4482 	struct amdgpu_ring *ring;
4483 
4484 	if (!(adev->flags & AMD_IS_APU))
4485 		gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4486 
4487 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4488 		/* legacy firmware loading */
4489 		r = gfx_v11_0_cp_gfx_load_microcode(adev);
4490 		if (r)
4491 			return r;
4492 
4493 		if (adev->gfx.rs64_enable)
4494 			r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4495 		else
4496 			r = gfx_v11_0_cp_compute_load_microcode(adev);
4497 		if (r)
4498 			return r;
4499 	}
4500 
4501 	gfx_v11_0_cp_set_doorbell_range(adev);
4502 
4503 	if (amdgpu_async_gfx_ring) {
4504 		gfx_v11_0_cp_compute_enable(adev, true);
4505 		gfx_v11_0_cp_gfx_enable(adev, true);
4506 	}
4507 
4508 	if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4509 		r = amdgpu_mes_kiq_hw_init(adev);
4510 	else
4511 		r = gfx_v11_0_kiq_resume(adev);
4512 	if (r)
4513 		return r;
4514 
4515 	r = gfx_v11_0_kcq_resume(adev);
4516 	if (r)
4517 		return r;
4518 
4519 	if (!amdgpu_async_gfx_ring) {
4520 		r = gfx_v11_0_cp_gfx_resume(adev);
4521 		if (r)
4522 			return r;
4523 	} else {
4524 		r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4525 		if (r)
4526 			return r;
4527 	}
4528 
4529 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4530 		ring = &adev->gfx.gfx_ring[i];
4531 		r = amdgpu_ring_test_helper(ring);
4532 		if (r)
4533 			return r;
4534 	}
4535 
4536 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4537 		ring = &adev->gfx.compute_ring[i];
4538 		r = amdgpu_ring_test_helper(ring);
4539 		if (r)
4540 			return r;
4541 	}
4542 
4543 	return 0;
4544 }
4545 
4546 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4547 {
4548 	gfx_v11_0_cp_gfx_enable(adev, enable);
4549 	gfx_v11_0_cp_compute_enable(adev, enable);
4550 }
4551 
4552 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4553 {
4554 	int r;
4555 	bool value;
4556 
4557 	r = adev->gfxhub.funcs->gart_enable(adev);
4558 	if (r)
4559 		return r;
4560 
4561 	adev->hdp.funcs->flush_hdp(adev, NULL);
4562 
4563 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
4564 		false : true;
4565 
4566 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4567 	/* TODO investigate why this and the hdp flush above is needed,
4568 	 * are we missing a flush somewhere else? */
4569 	adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
4570 
4571 	return 0;
4572 }
4573 
4574 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4575 {
4576 	u32 tmp;
4577 
4578 	/* select RS64 */
4579 	if (adev->gfx.rs64_enable) {
4580 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4581 		tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4582 		WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4583 
4584 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4585 		tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4586 		WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4587 	}
4588 
4589 	if (amdgpu_emu_mode == 1)
4590 		msleep(100);
4591 }
4592 
4593 static int get_gb_addr_config(struct amdgpu_device * adev)
4594 {
4595 	u32 gb_addr_config;
4596 
4597 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4598 	if (gb_addr_config == 0)
4599 		return -EINVAL;
4600 
4601 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
4602 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4603 
4604 	adev->gfx.config.gb_addr_config = gb_addr_config;
4605 
4606 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4607 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4608 				      GB_ADDR_CONFIG, NUM_PIPES);
4609 
4610 	adev->gfx.config.max_tile_pipes =
4611 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4612 
4613 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4614 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4615 				      GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4616 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4617 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4618 				      GB_ADDR_CONFIG, NUM_RB_PER_SE);
4619 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4620 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4621 				      GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4622 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4623 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4624 				      GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4625 
4626 	return 0;
4627 }
4628 
4629 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4630 {
4631 	uint32_t data;
4632 
4633 	data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4634 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4635 	WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4636 
4637 	data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4638 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4639 	WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4640 }
4641 
4642 static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
4643 {
4644 	int r;
4645 	struct amdgpu_device *adev = ip_block->adev;
4646 
4647 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
4648 				       adev->gfx.cleaner_shader_ptr);
4649 
4650 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4651 		if (adev->gfx.imu.funcs) {
4652 			/* RLC autoload sequence 1: Program rlc ram */
4653 			if (adev->gfx.imu.funcs->program_rlc_ram)
4654 				adev->gfx.imu.funcs->program_rlc_ram(adev);
4655 			/* rlc autoload firmware */
4656 			r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4657 			if (r)
4658 				return r;
4659 		}
4660 	} else {
4661 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4662 			if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4663 				if (adev->gfx.imu.funcs->load_microcode)
4664 					adev->gfx.imu.funcs->load_microcode(adev);
4665 				if (adev->gfx.imu.funcs->setup_imu)
4666 					adev->gfx.imu.funcs->setup_imu(adev);
4667 				if (adev->gfx.imu.funcs->start_imu)
4668 					adev->gfx.imu.funcs->start_imu(adev);
4669 			}
4670 
4671 			/* disable gpa mode in backdoor loading */
4672 			gfx_v11_0_disable_gpa_mode(adev);
4673 		}
4674 	}
4675 
4676 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4677 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4678 		r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4679 		if (r) {
4680 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4681 			return r;
4682 		}
4683 	}
4684 
4685 	adev->gfx.is_poweron = true;
4686 
4687 	if(get_gb_addr_config(adev))
4688 		DRM_WARN("Invalid gb_addr_config !\n");
4689 
4690 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4691 	    adev->gfx.rs64_enable)
4692 		gfx_v11_0_config_gfx_rs64(adev);
4693 
4694 	r = gfx_v11_0_gfxhub_enable(adev);
4695 	if (r)
4696 		return r;
4697 
4698 	if (!amdgpu_emu_mode)
4699 		gfx_v11_0_init_golden_registers(adev);
4700 
4701 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4702 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4703 		/**
4704 		 * For gfx 11, rlc firmware loading relies on smu firmware is
4705 		 * loaded firstly, so in direct type, it has to load smc ucode
4706 		 * here before rlc.
4707 		 */
4708 		r = amdgpu_pm_load_smu_firmware(adev, NULL);
4709 		if (r)
4710 			return r;
4711 	}
4712 
4713 	gfx_v11_0_constants_init(adev);
4714 
4715 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4716 		gfx_v11_0_select_cp_fw_arch(adev);
4717 
4718 	if (adev->nbio.funcs->gc_doorbell_init)
4719 		adev->nbio.funcs->gc_doorbell_init(adev);
4720 
4721 	r = gfx_v11_0_rlc_resume(adev);
4722 	if (r)
4723 		return r;
4724 
4725 	/*
4726 	 * init golden registers and rlc resume may override some registers,
4727 	 * reconfig them here
4728 	 */
4729 	gfx_v11_0_tcp_harvest(adev);
4730 
4731 	r = gfx_v11_0_cp_resume(adev);
4732 	if (r)
4733 		return r;
4734 
4735 	/* get IMU version from HW if it's not set */
4736 	if (!adev->gfx.imu_fw_version)
4737 		adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0);
4738 
4739 	return r;
4740 }
4741 
4742 static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
4743 {
4744 	struct amdgpu_device *adev = ip_block->adev;
4745 
4746 	cancel_delayed_work_sync(&adev->gfx.idle_work);
4747 
4748 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4749 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4750 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
4751 
4752 	if (!adev->no_hw_access) {
4753 		if (amdgpu_async_gfx_ring) {
4754 			if (amdgpu_gfx_disable_kgq(adev, 0))
4755 				DRM_ERROR("KGQ disable failed\n");
4756 		}
4757 
4758 		if (amdgpu_gfx_disable_kcq(adev, 0))
4759 			DRM_ERROR("KCQ disable failed\n");
4760 
4761 		amdgpu_mes_kiq_hw_fini(adev);
4762 	}
4763 
4764 	if (amdgpu_sriov_vf(adev))
4765 		/* Remove the steps disabling CPG and clearing KIQ position,
4766 		 * so that CP could perform IDLE-SAVE during switch. Those
4767 		 * steps are necessary to avoid a DMAR error in gfx9 but it is
4768 		 * not reproduced on gfx11.
4769 		 */
4770 		return 0;
4771 
4772 	gfx_v11_0_cp_enable(adev, false);
4773 	gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4774 
4775 	adev->gfxhub.funcs->gart_disable(adev);
4776 
4777 	adev->gfx.is_poweron = false;
4778 
4779 	return 0;
4780 }
4781 
4782 static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block)
4783 {
4784 	return gfx_v11_0_hw_fini(ip_block);
4785 }
4786 
4787 static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
4788 {
4789 	return gfx_v11_0_hw_init(ip_block);
4790 }
4791 
4792 static bool gfx_v11_0_is_idle(void *handle)
4793 {
4794 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4795 
4796 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4797 				GRBM_STATUS, GUI_ACTIVE))
4798 		return false;
4799 	else
4800 		return true;
4801 }
4802 
4803 static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4804 {
4805 	unsigned i;
4806 	u32 tmp;
4807 	struct amdgpu_device *adev = ip_block->adev;
4808 
4809 	for (i = 0; i < adev->usec_timeout; i++) {
4810 		/* read MC_STATUS */
4811 		tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4812 			GRBM_STATUS__GUI_ACTIVE_MASK;
4813 
4814 		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4815 			return 0;
4816 		udelay(1);
4817 	}
4818 	return -ETIMEDOUT;
4819 }
4820 
4821 int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
4822 				      bool req)
4823 {
4824 	u32 i, tmp, val;
4825 
4826 	for (i = 0; i < adev->usec_timeout; i++) {
4827 		/* Request with MeId=2, PipeId=0 */
4828 		tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
4829 		tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
4830 		WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
4831 
4832 		val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
4833 		if (req) {
4834 			if (val == tmp)
4835 				break;
4836 		} else {
4837 			tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
4838 					    REQUEST, 1);
4839 
4840 			/* unlocked or locked by firmware */
4841 			if (val != tmp)
4842 				break;
4843 		}
4844 		udelay(1);
4845 	}
4846 
4847 	if (i >= adev->usec_timeout)
4848 		return -EINVAL;
4849 
4850 	return 0;
4851 }
4852 
4853 static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
4854 {
4855 	u32 grbm_soft_reset = 0;
4856 	u32 tmp;
4857 	int r, i, j, k;
4858 	struct amdgpu_device *adev = ip_block->adev;
4859 
4860 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4861 
4862 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4863 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4864 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4865 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4866 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4867 	WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4868 
4869 	mutex_lock(&adev->srbm_mutex);
4870 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4871 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4872 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4873 				soc21_grbm_select(adev, i, k, j, 0);
4874 
4875 				WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
4876 				WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
4877 			}
4878 		}
4879 	}
4880 	for (i = 0; i < adev->gfx.me.num_me; ++i) {
4881 		for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
4882 			for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
4883 				soc21_grbm_select(adev, i, k, j, 0);
4884 
4885 				WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
4886 			}
4887 		}
4888 	}
4889 	soc21_grbm_select(adev, 0, 0, 0, 0);
4890 	mutex_unlock(&adev->srbm_mutex);
4891 
4892 	/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
4893 	mutex_lock(&adev->gfx.reset_sem_mutex);
4894 	r = gfx_v11_0_request_gfx_index_mutex(adev, true);
4895 	if (r) {
4896 		mutex_unlock(&adev->gfx.reset_sem_mutex);
4897 		DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
4898 		return r;
4899 	}
4900 
4901 	WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
4902 
4903 	// Read CP_VMID_RESET register three times.
4904 	// to get sufficient time for GFX_HQD_ACTIVE reach 0
4905 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4906 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4907 	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4908 
4909 	/* release the gfx mutex */
4910 	r = gfx_v11_0_request_gfx_index_mutex(adev, false);
4911 	mutex_unlock(&adev->gfx.reset_sem_mutex);
4912 	if (r) {
4913 		DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
4914 		return r;
4915 	}
4916 
4917 	for (i = 0; i < adev->usec_timeout; i++) {
4918 		if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
4919 		    !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
4920 			break;
4921 		udelay(1);
4922 	}
4923 	if (i >= adev->usec_timeout) {
4924 		printk("Failed to wait all pipes clean\n");
4925 		return -EINVAL;
4926 	}
4927 
4928 	/**********  trigger soft reset  ***********/
4929 	grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4930 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4931 					SOFT_RESET_CP, 1);
4932 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4933 					SOFT_RESET_GFX, 1);
4934 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4935 					SOFT_RESET_CPF, 1);
4936 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4937 					SOFT_RESET_CPC, 1);
4938 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4939 					SOFT_RESET_CPG, 1);
4940 	WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4941 	/**********  exit soft reset  ***********/
4942 	grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4943 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4944 					SOFT_RESET_CP, 0);
4945 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4946 					SOFT_RESET_GFX, 0);
4947 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4948 					SOFT_RESET_CPF, 0);
4949 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4950 					SOFT_RESET_CPC, 0);
4951 	grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4952 					SOFT_RESET_CPG, 0);
4953 	WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4954 
4955 	tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
4956 	tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
4957 	WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
4958 
4959 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
4960 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
4961 
4962 	for (i = 0; i < adev->usec_timeout; i++) {
4963 		if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
4964 			break;
4965 		udelay(1);
4966 	}
4967 	if (i >= adev->usec_timeout) {
4968 		printk("Failed to wait CP_VMID_RESET to 0\n");
4969 		return -EINVAL;
4970 	}
4971 
4972 	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4973 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4974 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4975 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4976 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4977 	WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4978 
4979 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4980 
4981 	return gfx_v11_0_cp_resume(adev);
4982 }
4983 
4984 static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
4985 {
4986 	int i, r;
4987 	struct amdgpu_device *adev = ip_block->adev;
4988 	struct amdgpu_ring *ring;
4989 	long tmo = msecs_to_jiffies(1000);
4990 
4991 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4992 		ring = &adev->gfx.gfx_ring[i];
4993 		r = amdgpu_ring_test_ib(ring, tmo);
4994 		if (r)
4995 			return true;
4996 	}
4997 
4998 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4999 		ring = &adev->gfx.compute_ring[i];
5000 		r = amdgpu_ring_test_ib(ring, tmo);
5001 		if (r)
5002 			return true;
5003 	}
5004 
5005 	return false;
5006 }
5007 
5008 static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
5009 {
5010 	struct amdgpu_device *adev = ip_block->adev;
5011 	/**
5012 	 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
5013 	 */
5014 	return amdgpu_mes_resume(adev);
5015 }
5016 
5017 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5018 {
5019 	uint64_t clock;
5020 	uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
5021 
5022 	if (amdgpu_sriov_vf(adev)) {
5023 		amdgpu_gfx_off_ctrl(adev, false);
5024 		mutex_lock(&adev->gfx.gpu_clock_mutex);
5025 		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
5026 		clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
5027 		clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
5028 		if (clock_counter_hi_pre != clock_counter_hi_after)
5029 			clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
5030 		mutex_unlock(&adev->gfx.gpu_clock_mutex);
5031 		amdgpu_gfx_off_ctrl(adev, true);
5032 	} else {
5033 		preempt_disable();
5034 		clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
5035 		clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
5036 		clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
5037 		if (clock_counter_hi_pre != clock_counter_hi_after)
5038 			clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
5039 		preempt_enable();
5040 	}
5041 	clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
5042 
5043 	return clock;
5044 }
5045 
5046 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5047 					   uint32_t vmid,
5048 					   uint32_t gds_base, uint32_t gds_size,
5049 					   uint32_t gws_base, uint32_t gws_size,
5050 					   uint32_t oa_base, uint32_t oa_size)
5051 {
5052 	struct amdgpu_device *adev = ring->adev;
5053 
5054 	/* GDS Base */
5055 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5056 				    SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
5057 				    gds_base);
5058 
5059 	/* GDS Size */
5060 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5061 				    SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
5062 				    gds_size);
5063 
5064 	/* GWS */
5065 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5066 				    SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
5067 				    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5068 
5069 	/* OA */
5070 	gfx_v11_0_write_data_to_reg(ring, 0, false,
5071 				    SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
5072 				    (1 << (oa_size + oa_base)) - (1 << oa_base));
5073 }
5074 
5075 static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
5076 {
5077 	struct amdgpu_device *adev = ip_block->adev;
5078 
5079 	adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
5080 
5081 	adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
5082 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5083 					  AMDGPU_MAX_COMPUTE_RINGS);
5084 
5085 	gfx_v11_0_set_kiq_pm4_funcs(adev);
5086 	gfx_v11_0_set_ring_funcs(adev);
5087 	gfx_v11_0_set_irq_funcs(adev);
5088 	gfx_v11_0_set_gds_init(adev);
5089 	gfx_v11_0_set_rlc_funcs(adev);
5090 	gfx_v11_0_set_mqd_funcs(adev);
5091 	gfx_v11_0_set_imu_funcs(adev);
5092 
5093 	gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
5094 
5095 	return gfx_v11_0_init_microcode(adev);
5096 }
5097 
5098 static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
5099 {
5100 	struct amdgpu_device *adev = ip_block->adev;
5101 	int r;
5102 
5103 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5104 	if (r)
5105 		return r;
5106 
5107 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5108 	if (r)
5109 		return r;
5110 
5111 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
5112 	if (r)
5113 		return r;
5114 	return 0;
5115 }
5116 
5117 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
5118 {
5119 	uint32_t rlc_cntl;
5120 
5121 	/* if RLC is not enabled, do nothing */
5122 	rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
5123 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
5124 }
5125 
5126 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
5127 {
5128 	uint32_t data;
5129 	unsigned i;
5130 
5131 	data = RLC_SAFE_MODE__CMD_MASK;
5132 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5133 
5134 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
5135 
5136 	/* wait for RLC_SAFE_MODE */
5137 	for (i = 0; i < adev->usec_timeout; i++) {
5138 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
5139 				   RLC_SAFE_MODE, CMD))
5140 			break;
5141 		udelay(1);
5142 	}
5143 }
5144 
5145 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
5146 {
5147 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
5148 }
5149 
5150 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
5151 				      bool enable)
5152 {
5153 	uint32_t def, data;
5154 
5155 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
5156 		return;
5157 
5158 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5159 
5160 	if (enable)
5161 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5162 	else
5163 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5164 
5165 	if (def != data)
5166 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5167 }
5168 
5169 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
5170 				       bool enable)
5171 {
5172 	uint32_t def, data;
5173 
5174 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
5175 		return;
5176 
5177 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5178 
5179 	if (enable)
5180 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5181 	else
5182 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5183 
5184 	if (def != data)
5185 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5186 }
5187 
5188 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
5189 					   bool enable)
5190 {
5191 	uint32_t def, data;
5192 
5193 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
5194 		return;
5195 
5196 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5197 
5198 	if (enable)
5199 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5200 	else
5201 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5202 
5203 	if (def != data)
5204 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5205 }
5206 
5207 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5208 						       bool enable)
5209 {
5210 	uint32_t data, def;
5211 
5212 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
5213 		return;
5214 
5215 	/* It is disabled by HW by default */
5216 	if (enable) {
5217 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5218 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
5219 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5220 
5221 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5222 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5223 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5224 
5225 			if (def != data)
5226 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5227 		}
5228 	} else {
5229 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5230 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5231 
5232 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5233 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5234 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5235 
5236 			if (def != data)
5237 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5238 		}
5239 	}
5240 }
5241 
5242 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5243 						       bool enable)
5244 {
5245 	uint32_t def, data;
5246 
5247 	if (!(adev->cg_flags &
5248 	      (AMD_CG_SUPPORT_GFX_CGCG |
5249 	      AMD_CG_SUPPORT_GFX_CGLS |
5250 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
5251 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
5252 		return;
5253 
5254 	if (enable) {
5255 		def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5256 
5257 		/* unset CGCG override */
5258 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5259 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5260 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5261 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5262 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
5263 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5264 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5265 
5266 		/* update CGCG override bits */
5267 		if (def != data)
5268 			WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5269 
5270 		/* enable cgcg FSM(0x0000363F) */
5271 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5272 
5273 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5274 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
5275 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5276 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5277 		}
5278 
5279 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5280 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
5281 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5282 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5283 		}
5284 
5285 		if (def != data)
5286 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5287 
5288 		/* Program RLC_CGCG_CGLS_CTRL_3D */
5289 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5290 
5291 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5292 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
5293 			data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5294 				 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5295 		}
5296 
5297 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5298 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
5299 			data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5300 				 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5301 		}
5302 
5303 		if (def != data)
5304 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5305 
5306 		/* set IDLE_POLL_COUNT(0x00900100) */
5307 		def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
5308 
5309 		data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
5310 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5311 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5312 
5313 		if (def != data)
5314 			WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
5315 
5316 		data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
5317 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
5318 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
5319 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
5320 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
5321 		WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
5322 
5323 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5324 		data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5325 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5326 
5327 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
5328 		if (adev->sdma.num_instances > 1) {
5329 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5330 			data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5331 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5332 		}
5333 	} else {
5334 		/* Program RLC_CGCG_CGLS_CTRL */
5335 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5336 
5337 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5338 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5339 
5340 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5341 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5342 
5343 		if (def != data)
5344 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5345 
5346 		/* Program RLC_CGCG_CGLS_CTRL_3D */
5347 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5348 
5349 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5350 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5351 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5352 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5353 
5354 		if (def != data)
5355 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5356 
5357 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5358 		data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5359 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5360 
5361 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
5362 		if (adev->sdma.num_instances > 1) {
5363 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5364 			data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5365 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5366 		}
5367 	}
5368 }
5369 
5370 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5371 					    bool enable)
5372 {
5373 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5374 
5375 	gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
5376 
5377 	gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
5378 
5379 	gfx_v11_0_update_repeater_fgcg(adev, enable);
5380 
5381 	gfx_v11_0_update_sram_fgcg(adev, enable);
5382 
5383 	gfx_v11_0_update_perf_clk(adev, enable);
5384 
5385 	if (adev->cg_flags &
5386 	    (AMD_CG_SUPPORT_GFX_MGCG |
5387 	     AMD_CG_SUPPORT_GFX_CGLS |
5388 	     AMD_CG_SUPPORT_GFX_CGCG |
5389 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
5390 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
5391 	        gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
5392 
5393 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5394 
5395 	return 0;
5396 }
5397 
5398 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
5399 {
5400 	u32 reg, pre_data, data;
5401 
5402 	amdgpu_gfx_off_ctrl(adev, false);
5403 	reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
5404 	if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
5405 		pre_data = RREG32_NO_KIQ(reg);
5406 	else
5407 		pre_data = RREG32(reg);
5408 
5409 	data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
5410 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5411 
5412 	if (pre_data != data) {
5413 		if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
5414 			WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
5415 		} else
5416 			WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
5417 	}
5418 	amdgpu_gfx_off_ctrl(adev, true);
5419 
5420 	if (ring
5421 		&& amdgpu_sriov_is_pp_one_vf(adev)
5422 		&& (pre_data != data)
5423 		&& ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
5424 			|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
5425 		amdgpu_ring_emit_wreg(ring, reg, data);
5426 	}
5427 }
5428 
5429 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5430 	.is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5431 	.set_safe_mode = gfx_v11_0_set_safe_mode,
5432 	.unset_safe_mode = gfx_v11_0_unset_safe_mode,
5433 	.init = gfx_v11_0_rlc_init,
5434 	.get_csb_size = gfx_v11_0_get_csb_size,
5435 	.get_csb_buffer = gfx_v11_0_get_csb_buffer,
5436 	.resume = gfx_v11_0_rlc_resume,
5437 	.stop = gfx_v11_0_rlc_stop,
5438 	.reset = gfx_v11_0_rlc_reset,
5439 	.start = gfx_v11_0_rlc_start,
5440 	.update_spm_vmid = gfx_v11_0_update_spm_vmid,
5441 };
5442 
5443 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
5444 {
5445 	u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
5446 
5447 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5448 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5449 	else
5450 		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5451 
5452 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5453 
5454 	// Program RLC_PG_DELAY3 for CGPG hysteresis
5455 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5456 		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5457 		case IP_VERSION(11, 0, 1):
5458 		case IP_VERSION(11, 0, 4):
5459 		case IP_VERSION(11, 5, 0):
5460 		case IP_VERSION(11, 5, 1):
5461 		case IP_VERSION(11, 5, 2):
5462 		case IP_VERSION(11, 5, 3):
5463 			WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5464 			break;
5465 		default:
5466 			break;
5467 		}
5468 	}
5469 }
5470 
5471 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5472 {
5473 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5474 
5475 	gfx_v11_cntl_power_gating(adev, enable);
5476 
5477 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5478 }
5479 
5480 static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
5481 					   enum amd_powergating_state state)
5482 {
5483 	struct amdgpu_device *adev = ip_block->adev;
5484 	bool enable = (state == AMD_PG_STATE_GATE);
5485 
5486 	if (amdgpu_sriov_vf(adev))
5487 		return 0;
5488 
5489 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5490 	case IP_VERSION(11, 0, 0):
5491 	case IP_VERSION(11, 0, 2):
5492 	case IP_VERSION(11, 0, 3):
5493 		amdgpu_gfx_off_ctrl(adev, enable);
5494 		break;
5495 	case IP_VERSION(11, 0, 1):
5496 	case IP_VERSION(11, 0, 4):
5497 	case IP_VERSION(11, 5, 0):
5498 	case IP_VERSION(11, 5, 1):
5499 	case IP_VERSION(11, 5, 2):
5500 	case IP_VERSION(11, 5, 3):
5501 		if (!enable)
5502 			amdgpu_gfx_off_ctrl(adev, false);
5503 
5504 		gfx_v11_cntl_pg(adev, enable);
5505 
5506 		if (enable)
5507 			amdgpu_gfx_off_ctrl(adev, true);
5508 
5509 		break;
5510 	default:
5511 		break;
5512 	}
5513 
5514 	return 0;
5515 }
5516 
5517 static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
5518 					  enum amd_clockgating_state state)
5519 {
5520 	struct amdgpu_device *adev = ip_block->adev;
5521 
5522 	if (amdgpu_sriov_vf(adev))
5523 	        return 0;
5524 
5525 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5526 	case IP_VERSION(11, 0, 0):
5527 	case IP_VERSION(11, 0, 1):
5528 	case IP_VERSION(11, 0, 2):
5529 	case IP_VERSION(11, 0, 3):
5530 	case IP_VERSION(11, 0, 4):
5531 	case IP_VERSION(11, 5, 0):
5532 	case IP_VERSION(11, 5, 1):
5533 	case IP_VERSION(11, 5, 2):
5534 	case IP_VERSION(11, 5, 3):
5535 	        gfx_v11_0_update_gfx_clock_gating(adev,
5536 	                        state ==  AMD_CG_STATE_GATE);
5537 	        break;
5538 	default:
5539 	        break;
5540 	}
5541 
5542 	return 0;
5543 }
5544 
5545 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
5546 {
5547 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5548 	int data;
5549 
5550 	/* AMD_CG_SUPPORT_GFX_MGCG */
5551 	data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5552 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5553 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5554 
5555 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
5556 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5557 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5558 
5559 	/* AMD_CG_SUPPORT_GFX_FGCG */
5560 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5561 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
5562 
5563 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
5564 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5565 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5566 
5567 	/* AMD_CG_SUPPORT_GFX_CGCG */
5568 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5569 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5570 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5571 
5572 	/* AMD_CG_SUPPORT_GFX_CGLS */
5573 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5574 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5575 
5576 	/* AMD_CG_SUPPORT_GFX_3D_CGCG */
5577 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5578 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5579 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5580 
5581 	/* AMD_CG_SUPPORT_GFX_3D_CGLS */
5582 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5583 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5584 }
5585 
5586 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5587 {
5588 	/* gfx11 is 32bit rptr*/
5589 	return *(uint32_t *)ring->rptr_cpu_addr;
5590 }
5591 
5592 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5593 {
5594 	struct amdgpu_device *adev = ring->adev;
5595 	u64 wptr;
5596 
5597 	/* XXX check if swapping is necessary on BE */
5598 	if (ring->use_doorbell) {
5599 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5600 	} else {
5601 		wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5602 		wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5603 	}
5604 
5605 	return wptr;
5606 }
5607 
5608 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5609 {
5610 	struct amdgpu_device *adev = ring->adev;
5611 
5612 	if (ring->use_doorbell) {
5613 		/* XXX check if swapping is necessary on BE */
5614 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5615 			     ring->wptr);
5616 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5617 	} else {
5618 		WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5619 			     lower_32_bits(ring->wptr));
5620 		WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5621 			     upper_32_bits(ring->wptr));
5622 	}
5623 }
5624 
5625 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5626 {
5627 	/* gfx11 hardware is 32bit rptr */
5628 	return *(uint32_t *)ring->rptr_cpu_addr;
5629 }
5630 
5631 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5632 {
5633 	u64 wptr;
5634 
5635 	/* XXX check if swapping is necessary on BE */
5636 	if (ring->use_doorbell)
5637 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5638 	else
5639 		BUG();
5640 	return wptr;
5641 }
5642 
5643 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5644 {
5645 	struct amdgpu_device *adev = ring->adev;
5646 
5647 	/* XXX check if swapping is necessary on BE */
5648 	if (ring->use_doorbell) {
5649 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5650 			     ring->wptr);
5651 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5652 	} else {
5653 		BUG(); /* only DOORBELL method supported on gfx11 now */
5654 	}
5655 }
5656 
5657 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5658 {
5659 	struct amdgpu_device *adev = ring->adev;
5660 	u32 ref_and_mask, reg_mem_engine;
5661 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5662 
5663 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5664 		switch (ring->me) {
5665 		case 1:
5666 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5667 			break;
5668 		case 2:
5669 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5670 			break;
5671 		default:
5672 			return;
5673 		}
5674 		reg_mem_engine = 0;
5675 	} else {
5676 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
5677 		reg_mem_engine = 1; /* pfp */
5678 	}
5679 
5680 	gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5681 			       adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5682 			       adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5683 			       ref_and_mask, ref_and_mask, 0x20);
5684 }
5685 
5686 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5687 				       struct amdgpu_job *job,
5688 				       struct amdgpu_ib *ib,
5689 				       uint32_t flags)
5690 {
5691 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5692 	u32 header, control = 0;
5693 
5694 	BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5695 
5696 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5697 
5698 	control |= ib->length_dw | (vmid << 24);
5699 
5700 	if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5701 		control |= INDIRECT_BUFFER_PRE_ENB(1);
5702 
5703 		if (flags & AMDGPU_IB_PREEMPTED)
5704 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
5705 
5706 		if (vmid)
5707 			gfx_v11_0_ring_emit_de_meta(ring,
5708 				    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5709 	}
5710 
5711 	if (ring->is_mes_queue)
5712 		/* inherit vmid from mqd */
5713 		control |= 0x400000;
5714 
5715 	amdgpu_ring_write(ring, header);
5716 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5717 	amdgpu_ring_write(ring,
5718 #ifdef __BIG_ENDIAN
5719 		(2 << 0) |
5720 #endif
5721 		lower_32_bits(ib->gpu_addr));
5722 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5723 	amdgpu_ring_write(ring, control);
5724 }
5725 
5726 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5727 					   struct amdgpu_job *job,
5728 					   struct amdgpu_ib *ib,
5729 					   uint32_t flags)
5730 {
5731 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5732 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5733 
5734 	if (ring->is_mes_queue)
5735 		/* inherit vmid from mqd */
5736 		control |= 0x40000000;
5737 
5738 	/* Currently, there is a high possibility to get wave ID mismatch
5739 	 * between ME and GDS, leading to a hw deadlock, because ME generates
5740 	 * different wave IDs than the GDS expects. This situation happens
5741 	 * randomly when at least 5 compute pipes use GDS ordered append.
5742 	 * The wave IDs generated by ME are also wrong after suspend/resume.
5743 	 * Those are probably bugs somewhere else in the kernel driver.
5744 	 *
5745 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5746 	 * GDS to 0 for this ring (me/pipe).
5747 	 */
5748 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5749 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5750 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5751 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5752 	}
5753 
5754 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5755 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5756 	amdgpu_ring_write(ring,
5757 #ifdef __BIG_ENDIAN
5758 				(2 << 0) |
5759 #endif
5760 				lower_32_bits(ib->gpu_addr));
5761 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5762 	amdgpu_ring_write(ring, control);
5763 }
5764 
5765 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5766 				     u64 seq, unsigned flags)
5767 {
5768 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5769 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5770 
5771 	/* RELEASE_MEM - flush caches, send int */
5772 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5773 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5774 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5775 				 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
5776 				 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5777 				 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5778 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5779 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5780 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5781 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5782 
5783 	/*
5784 	 * the address should be Qword aligned if 64bit write, Dword
5785 	 * aligned if only send 32bit data low (discard data high)
5786 	 */
5787 	if (write64bit)
5788 		BUG_ON(addr & 0x7);
5789 	else
5790 		BUG_ON(addr & 0x3);
5791 	amdgpu_ring_write(ring, lower_32_bits(addr));
5792 	amdgpu_ring_write(ring, upper_32_bits(addr));
5793 	amdgpu_ring_write(ring, lower_32_bits(seq));
5794 	amdgpu_ring_write(ring, upper_32_bits(seq));
5795 	amdgpu_ring_write(ring, ring->is_mes_queue ?
5796 			 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
5797 }
5798 
5799 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5800 {
5801 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5802 	uint32_t seq = ring->fence_drv.sync_seq;
5803 	uint64_t addr = ring->fence_drv.gpu_addr;
5804 
5805 	gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5806 			       upper_32_bits(addr), seq, 0xffffffff, 4);
5807 }
5808 
5809 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5810 				   uint16_t pasid, uint32_t flush_type,
5811 				   bool all_hub, uint8_t dst_sel)
5812 {
5813 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5814 	amdgpu_ring_write(ring,
5815 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5816 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5817 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5818 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5819 }
5820 
5821 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5822 					 unsigned vmid, uint64_t pd_addr)
5823 {
5824 	if (ring->is_mes_queue)
5825 		gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
5826 	else
5827 		amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5828 
5829 	/* compute doesn't have PFP */
5830 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5831 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
5832 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5833 		amdgpu_ring_write(ring, 0x0);
5834 	}
5835 
5836 	/* Make sure that we can't skip the SET_Q_MODE packets when the VM
5837 	 * changed in any way.
5838 	 */
5839 	ring->set_q_mode_offs = 0;
5840 	ring->set_q_mode_ptr = NULL;
5841 }
5842 
5843 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5844 					  u64 seq, unsigned int flags)
5845 {
5846 	struct amdgpu_device *adev = ring->adev;
5847 
5848 	/* we only allocate 32bit for each seq wb address */
5849 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5850 
5851 	/* write fence seq to the "addr" */
5852 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5853 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5854 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5855 	amdgpu_ring_write(ring, lower_32_bits(addr));
5856 	amdgpu_ring_write(ring, upper_32_bits(addr));
5857 	amdgpu_ring_write(ring, lower_32_bits(seq));
5858 
5859 	if (flags & AMDGPU_FENCE_FLAG_INT) {
5860 		/* set register to trigger INT */
5861 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5862 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5863 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5864 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
5865 		amdgpu_ring_write(ring, 0);
5866 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5867 	}
5868 }
5869 
5870 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
5871 					 uint32_t flags)
5872 {
5873 	uint32_t dw2 = 0;
5874 
5875 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5876 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5877 		/* set load_global_config & load_global_uconfig */
5878 		dw2 |= 0x8001;
5879 		/* set load_cs_sh_regs */
5880 		dw2 |= 0x01000000;
5881 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
5882 		dw2 |= 0x10002;
5883 	}
5884 
5885 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5886 	amdgpu_ring_write(ring, dw2);
5887 	amdgpu_ring_write(ring, 0);
5888 }
5889 
5890 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
5891 						   uint64_t addr)
5892 {
5893 	unsigned ret;
5894 
5895 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5896 	amdgpu_ring_write(ring, lower_32_bits(addr));
5897 	amdgpu_ring_write(ring, upper_32_bits(addr));
5898 	/* discard following DWs if *cond_exec_gpu_addr==0 */
5899 	amdgpu_ring_write(ring, 0);
5900 	ret = ring->wptr & ring->buf_mask;
5901 	/* patch dummy value later */
5902 	amdgpu_ring_write(ring, 0);
5903 
5904 	return ret;
5905 }
5906 
5907 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
5908 					   u64 shadow_va, u64 csa_va,
5909 					   u64 gds_va, bool init_shadow,
5910 					   int vmid)
5911 {
5912 	struct amdgpu_device *adev = ring->adev;
5913 	unsigned int offs, end;
5914 
5915 	if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj)
5916 		return;
5917 
5918 	/*
5919 	 * The logic here isn't easy to understand because we need to keep state
5920 	 * accross multiple executions of the function as well as between the
5921 	 * CPU and GPU. The general idea is that the newly written GPU command
5922 	 * has a condition on the previous one and only executed if really
5923 	 * necessary.
5924 	 */
5925 
5926 	/*
5927 	 * The dw in the NOP controls if the next SET_Q_MODE packet should be
5928 	 * executed or not. Reserve 64bits just to be on the save side.
5929 	 */
5930 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1));
5931 	offs = ring->wptr & ring->buf_mask;
5932 
5933 	/*
5934 	 * We start with skipping the prefix SET_Q_MODE and always executing
5935 	 * the postfix SET_Q_MODE packet. This is changed below with a
5936 	 * WRITE_DATA command when the postfix executed.
5937 	 */
5938 	amdgpu_ring_write(ring, shadow_va ? 1 : 0);
5939 	amdgpu_ring_write(ring, 0);
5940 
5941 	if (ring->set_q_mode_offs) {
5942 		uint64_t addr;
5943 
5944 		addr = amdgpu_bo_gpu_offset(ring->ring_obj);
5945 		addr += ring->set_q_mode_offs << 2;
5946 		end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr);
5947 	}
5948 
5949 	/*
5950 	 * When the postfix SET_Q_MODE packet executes we need to make sure that the
5951 	 * next prefix SET_Q_MODE packet executes as well.
5952 	 */
5953 	if (!shadow_va) {
5954 		uint64_t addr;
5955 
5956 		addr = amdgpu_bo_gpu_offset(ring->ring_obj);
5957 		addr += offs << 2;
5958 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5959 		amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
5960 		amdgpu_ring_write(ring, lower_32_bits(addr));
5961 		amdgpu_ring_write(ring, upper_32_bits(addr));
5962 		amdgpu_ring_write(ring, 0x1);
5963 	}
5964 
5965 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
5966 	amdgpu_ring_write(ring, lower_32_bits(shadow_va));
5967 	amdgpu_ring_write(ring, upper_32_bits(shadow_va));
5968 	amdgpu_ring_write(ring, lower_32_bits(gds_va));
5969 	amdgpu_ring_write(ring, upper_32_bits(gds_va));
5970 	amdgpu_ring_write(ring, lower_32_bits(csa_va));
5971 	amdgpu_ring_write(ring, upper_32_bits(csa_va));
5972 	amdgpu_ring_write(ring, shadow_va ?
5973 			  PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
5974 	amdgpu_ring_write(ring, init_shadow ?
5975 			  PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
5976 
5977 	if (ring->set_q_mode_offs)
5978 		amdgpu_ring_patch_cond_exec(ring, end);
5979 
5980 	if (shadow_va) {
5981 		uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid;
5982 
5983 		/*
5984 		 * If the tokens match try to skip the last postfix SET_Q_MODE
5985 		 * packet to avoid saving/restoring the state all the time.
5986 		 */
5987 		if (ring->set_q_mode_ptr && ring->set_q_mode_token == token)
5988 			*ring->set_q_mode_ptr = 0;
5989 
5990 		ring->set_q_mode_token = token;
5991 	} else {
5992 		ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs];
5993 	}
5994 
5995 	ring->set_q_mode_offs = offs;
5996 }
5997 
5998 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
5999 {
6000 	int i, r = 0;
6001 	struct amdgpu_device *adev = ring->adev;
6002 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
6003 	struct amdgpu_ring *kiq_ring = &kiq->ring;
6004 	unsigned long flags;
6005 
6006 	if (adev->enable_mes)
6007 		return -EINVAL;
6008 
6009 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
6010 		return -EINVAL;
6011 
6012 	spin_lock_irqsave(&kiq->ring_lock, flags);
6013 
6014 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
6015 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
6016 		return -ENOMEM;
6017 	}
6018 
6019 	/* assert preemption condition */
6020 	amdgpu_ring_set_preempt_cond_exec(ring, false);
6021 
6022 	/* assert IB preemption, emit the trailing fence */
6023 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
6024 				   ring->trail_fence_gpu_addr,
6025 				   ++ring->trail_seq);
6026 	amdgpu_ring_commit(kiq_ring);
6027 
6028 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
6029 
6030 	/* poll the trailing fence */
6031 	for (i = 0; i < adev->usec_timeout; i++) {
6032 		if (ring->trail_seq ==
6033 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
6034 			break;
6035 		udelay(1);
6036 	}
6037 
6038 	if (i >= adev->usec_timeout) {
6039 		r = -EINVAL;
6040 		DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
6041 	}
6042 
6043 	/* deassert preemption condition */
6044 	amdgpu_ring_set_preempt_cond_exec(ring, true);
6045 	return r;
6046 }
6047 
6048 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
6049 {
6050 	struct amdgpu_device *adev = ring->adev;
6051 	struct v10_de_ib_state de_payload = {0};
6052 	uint64_t offset, gds_addr, de_payload_gpu_addr;
6053 	void *de_payload_cpu_addr;
6054 	int cnt;
6055 
6056 	if (ring->is_mes_queue) {
6057 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
6058 				  gfx[0].gfx_meta_data) +
6059 			offsetof(struct v10_gfx_meta_data, de_payload);
6060 		de_payload_gpu_addr =
6061 			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
6062 		de_payload_cpu_addr =
6063 			amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
6064 
6065 		offset = offsetof(struct amdgpu_mes_ctx_meta_data,
6066 				  gfx[0].gds_backup) +
6067 			offsetof(struct v10_gfx_meta_data, de_payload);
6068 		gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
6069 	} else {
6070 		offset = offsetof(struct v10_gfx_meta_data, de_payload);
6071 		de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
6072 		de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
6073 
6074 		gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
6075 				 AMDGPU_CSA_SIZE - adev->gds.gds_size,
6076 				 PAGE_SIZE);
6077 	}
6078 
6079 	de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
6080 	de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
6081 
6082 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
6083 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
6084 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
6085 				 WRITE_DATA_DST_SEL(8) |
6086 				 WR_CONFIRM) |
6087 				 WRITE_DATA_CACHE_POLICY(0));
6088 	amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
6089 	amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
6090 
6091 	if (resume)
6092 		amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
6093 					   sizeof(de_payload) >> 2);
6094 	else
6095 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
6096 					   sizeof(de_payload) >> 2);
6097 }
6098 
6099 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
6100 				    bool secure)
6101 {
6102 	uint32_t v = secure ? FRAME_TMZ : 0;
6103 
6104 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
6105 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
6106 }
6107 
6108 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6109 				     uint32_t reg_val_offs)
6110 {
6111 	struct amdgpu_device *adev = ring->adev;
6112 
6113 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6114 	amdgpu_ring_write(ring, 0 |	/* src: register*/
6115 				(5 << 8) |	/* dst: memory */
6116 				(1 << 20));	/* write confirm */
6117 	amdgpu_ring_write(ring, reg);
6118 	amdgpu_ring_write(ring, 0);
6119 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6120 				reg_val_offs * 4));
6121 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6122 				reg_val_offs * 4));
6123 }
6124 
6125 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6126 				   uint32_t val)
6127 {
6128 	uint32_t cmd = 0;
6129 
6130 	switch (ring->funcs->type) {
6131 	case AMDGPU_RING_TYPE_GFX:
6132 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6133 		break;
6134 	case AMDGPU_RING_TYPE_KIQ:
6135 		cmd = (1 << 16); /* no inc addr */
6136 		break;
6137 	default:
6138 		cmd = WR_CONFIRM;
6139 		break;
6140 	}
6141 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6142 	amdgpu_ring_write(ring, cmd);
6143 	amdgpu_ring_write(ring, reg);
6144 	amdgpu_ring_write(ring, 0);
6145 	amdgpu_ring_write(ring, val);
6146 }
6147 
6148 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
6149 					uint32_t val, uint32_t mask)
6150 {
6151 	gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
6152 }
6153 
6154 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
6155 						   uint32_t reg0, uint32_t reg1,
6156 						   uint32_t ref, uint32_t mask)
6157 {
6158 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6159 
6160 	gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
6161 			       ref, mask, 0x20);
6162 }
6163 
6164 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
6165 					 unsigned vmid)
6166 {
6167 	struct amdgpu_device *adev = ring->adev;
6168 	uint32_t value = 0;
6169 
6170 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6171 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6172 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6173 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6174 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
6175 	WREG32_SOC15(GC, 0, regSQ_CMD, value);
6176 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
6177 }
6178 
6179 static void
6180 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6181 				      uint32_t me, uint32_t pipe,
6182 				      enum amdgpu_interrupt_state state)
6183 {
6184 	uint32_t cp_int_cntl, cp_int_cntl_reg;
6185 
6186 	if (!me) {
6187 		switch (pipe) {
6188 		case 0:
6189 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
6190 			break;
6191 		case 1:
6192 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
6193 			break;
6194 		default:
6195 			DRM_DEBUG("invalid pipe %d\n", pipe);
6196 			return;
6197 		}
6198 	} else {
6199 		DRM_DEBUG("invalid me %d\n", me);
6200 		return;
6201 	}
6202 
6203 	switch (state) {
6204 	case AMDGPU_IRQ_STATE_DISABLE:
6205 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6206 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6207 					    TIME_STAMP_INT_ENABLE, 0);
6208 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6209 					    GENERIC0_INT_ENABLE, 0);
6210 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6211 		break;
6212 	case AMDGPU_IRQ_STATE_ENABLE:
6213 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6214 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6215 					    TIME_STAMP_INT_ENABLE, 1);
6216 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6217 					    GENERIC0_INT_ENABLE, 1);
6218 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6219 		break;
6220 	default:
6221 		break;
6222 	}
6223 }
6224 
6225 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6226 						     int me, int pipe,
6227 						     enum amdgpu_interrupt_state state)
6228 {
6229 	u32 mec_int_cntl, mec_int_cntl_reg;
6230 
6231 	/*
6232 	 * amdgpu controls only the first MEC. That's why this function only
6233 	 * handles the setting of interrupts for this specific MEC. All other
6234 	 * pipes' interrupts are set by amdkfd.
6235 	 */
6236 
6237 	if (me == 1) {
6238 		switch (pipe) {
6239 		case 0:
6240 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6241 			break;
6242 		case 1:
6243 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
6244 			break;
6245 		case 2:
6246 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
6247 			break;
6248 		case 3:
6249 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
6250 			break;
6251 		default:
6252 			DRM_DEBUG("invalid pipe %d\n", pipe);
6253 			return;
6254 		}
6255 	} else {
6256 		DRM_DEBUG("invalid me %d\n", me);
6257 		return;
6258 	}
6259 
6260 	switch (state) {
6261 	case AMDGPU_IRQ_STATE_DISABLE:
6262 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6263 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6264 					     TIME_STAMP_INT_ENABLE, 0);
6265 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6266 					     GENERIC0_INT_ENABLE, 0);
6267 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6268 		break;
6269 	case AMDGPU_IRQ_STATE_ENABLE:
6270 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6271 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6272 					     TIME_STAMP_INT_ENABLE, 1);
6273 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6274 					     GENERIC0_INT_ENABLE, 1);
6275 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6276 		break;
6277 	default:
6278 		break;
6279 	}
6280 }
6281 
6282 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6283 					    struct amdgpu_irq_src *src,
6284 					    unsigned type,
6285 					    enum amdgpu_interrupt_state state)
6286 {
6287 	switch (type) {
6288 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6289 		gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
6290 		break;
6291 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
6292 		gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
6293 		break;
6294 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6295 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6296 		break;
6297 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6298 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6299 		break;
6300 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6301 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6302 		break;
6303 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6304 		gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6305 		break;
6306 	default:
6307 		break;
6308 	}
6309 	return 0;
6310 }
6311 
6312 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
6313 			     struct amdgpu_irq_src *source,
6314 			     struct amdgpu_iv_entry *entry)
6315 {
6316 	int i;
6317 	u8 me_id, pipe_id, queue_id;
6318 	struct amdgpu_ring *ring;
6319 	uint32_t mes_queue_id = entry->src_data[0];
6320 
6321 	DRM_DEBUG("IH: CP EOP\n");
6322 
6323 	if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
6324 		struct amdgpu_mes_queue *queue;
6325 
6326 		mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
6327 
6328 		spin_lock(&adev->mes.queue_id_lock);
6329 		queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
6330 		if (queue) {
6331 			DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
6332 			amdgpu_fence_process(queue->ring);
6333 		}
6334 		spin_unlock(&adev->mes.queue_id_lock);
6335 	} else {
6336 		me_id = (entry->ring_id & 0x0c) >> 2;
6337 		pipe_id = (entry->ring_id & 0x03) >> 0;
6338 		queue_id = (entry->ring_id & 0x70) >> 4;
6339 
6340 		switch (me_id) {
6341 		case 0:
6342 			if (pipe_id == 0)
6343 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6344 			else
6345 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
6346 			break;
6347 		case 1:
6348 		case 2:
6349 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6350 				ring = &adev->gfx.compute_ring[i];
6351 				/* Per-queue interrupt is supported for MEC starting from VI.
6352 				 * The interrupt can only be enabled/disabled per pipe instead
6353 				 * of per queue.
6354 				 */
6355 				if ((ring->me == me_id) &&
6356 				    (ring->pipe == pipe_id) &&
6357 				    (ring->queue == queue_id))
6358 					amdgpu_fence_process(ring);
6359 			}
6360 			break;
6361 		}
6362 	}
6363 
6364 	return 0;
6365 }
6366 
6367 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6368 					      struct amdgpu_irq_src *source,
6369 					      unsigned int type,
6370 					      enum amdgpu_interrupt_state state)
6371 {
6372 	u32 cp_int_cntl_reg, cp_int_cntl;
6373 	int i, j;
6374 
6375 	switch (state) {
6376 	case AMDGPU_IRQ_STATE_DISABLE:
6377 	case AMDGPU_IRQ_STATE_ENABLE:
6378 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6379 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6380 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6381 
6382 				if (cp_int_cntl_reg) {
6383 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6384 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6385 								    PRIV_REG_INT_ENABLE,
6386 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6387 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6388 				}
6389 			}
6390 		}
6391 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6392 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6393 				/* MECs start at 1 */
6394 				cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
6395 
6396 				if (cp_int_cntl_reg) {
6397 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6398 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6399 								    PRIV_REG_INT_ENABLE,
6400 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6401 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6402 				}
6403 			}
6404 		}
6405 		break;
6406 	default:
6407 		break;
6408 	}
6409 
6410 	return 0;
6411 }
6412 
6413 static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev,
6414 					    struct amdgpu_irq_src *source,
6415 					    unsigned type,
6416 					    enum amdgpu_interrupt_state state)
6417 {
6418 	u32 cp_int_cntl_reg, cp_int_cntl;
6419 	int i, j;
6420 
6421 	switch (state) {
6422 	case AMDGPU_IRQ_STATE_DISABLE:
6423 	case AMDGPU_IRQ_STATE_ENABLE:
6424 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6425 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6426 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6427 
6428 				if (cp_int_cntl_reg) {
6429 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6430 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6431 								    OPCODE_ERROR_INT_ENABLE,
6432 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6433 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6434 				}
6435 			}
6436 		}
6437 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6438 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6439 				/* MECs start at 1 */
6440 				cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
6441 
6442 				if (cp_int_cntl_reg) {
6443 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6444 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6445 								    OPCODE_ERROR_INT_ENABLE,
6446 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6447 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6448 				}
6449 			}
6450 		}
6451 		break;
6452 	default:
6453 		break;
6454 	}
6455 	return 0;
6456 }
6457 
6458 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6459 					       struct amdgpu_irq_src *source,
6460 					       unsigned int type,
6461 					       enum amdgpu_interrupt_state state)
6462 {
6463 	u32 cp_int_cntl_reg, cp_int_cntl;
6464 	int i, j;
6465 
6466 	switch (state) {
6467 	case AMDGPU_IRQ_STATE_DISABLE:
6468 	case AMDGPU_IRQ_STATE_ENABLE:
6469 		for (i = 0; i < adev->gfx.me.num_me; i++) {
6470 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6471 				cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6472 
6473 				if (cp_int_cntl_reg) {
6474 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6475 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6476 								    PRIV_INSTR_INT_ENABLE,
6477 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6478 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6479 				}
6480 			}
6481 		}
6482 		break;
6483 	default:
6484 		break;
6485 	}
6486 
6487 	return 0;
6488 }
6489 
6490 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
6491 					struct amdgpu_iv_entry *entry)
6492 {
6493 	u8 me_id, pipe_id, queue_id;
6494 	struct amdgpu_ring *ring;
6495 	int i;
6496 
6497 	me_id = (entry->ring_id & 0x0c) >> 2;
6498 	pipe_id = (entry->ring_id & 0x03) >> 0;
6499 	queue_id = (entry->ring_id & 0x70) >> 4;
6500 
6501 	switch (me_id) {
6502 	case 0:
6503 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
6504 			ring = &adev->gfx.gfx_ring[i];
6505 			if (ring->me == me_id && ring->pipe == pipe_id &&
6506 			    ring->queue == queue_id)
6507 				drm_sched_fault(&ring->sched);
6508 		}
6509 		break;
6510 	case 1:
6511 	case 2:
6512 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6513 			ring = &adev->gfx.compute_ring[i];
6514 			if (ring->me == me_id && ring->pipe == pipe_id &&
6515 			    ring->queue == queue_id)
6516 				drm_sched_fault(&ring->sched);
6517 		}
6518 		break;
6519 	default:
6520 		BUG();
6521 		break;
6522 	}
6523 }
6524 
6525 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
6526 				  struct amdgpu_irq_src *source,
6527 				  struct amdgpu_iv_entry *entry)
6528 {
6529 	DRM_ERROR("Illegal register access in command stream\n");
6530 	gfx_v11_0_handle_priv_fault(adev, entry);
6531 	return 0;
6532 }
6533 
6534 static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev,
6535 				struct amdgpu_irq_src *source,
6536 				struct amdgpu_iv_entry *entry)
6537 {
6538 	DRM_ERROR("Illegal opcode in command stream \n");
6539 	gfx_v11_0_handle_priv_fault(adev, entry);
6540 	return 0;
6541 }
6542 
6543 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
6544 				   struct amdgpu_irq_src *source,
6545 				   struct amdgpu_iv_entry *entry)
6546 {
6547 	DRM_ERROR("Illegal instruction in command stream\n");
6548 	gfx_v11_0_handle_priv_fault(adev, entry);
6549 	return 0;
6550 }
6551 
6552 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
6553 				  struct amdgpu_irq_src *source,
6554 				  struct amdgpu_iv_entry *entry)
6555 {
6556 	if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
6557 		return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
6558 
6559 	return 0;
6560 }
6561 
6562 #if 0
6563 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6564 					     struct amdgpu_irq_src *src,
6565 					     unsigned int type,
6566 					     enum amdgpu_interrupt_state state)
6567 {
6568 	uint32_t tmp, target;
6569 	struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
6570 
6571 	target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6572 	target += ring->pipe;
6573 
6574 	switch (type) {
6575 	case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6576 		if (state == AMDGPU_IRQ_STATE_DISABLE) {
6577 			tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6578 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6579 					    GENERIC2_INT_ENABLE, 0);
6580 			WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6581 
6582 			tmp = RREG32_SOC15_IP(GC, target);
6583 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6584 					    GENERIC2_INT_ENABLE, 0);
6585 			WREG32_SOC15_IP(GC, target, tmp);
6586 		} else {
6587 			tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6588 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6589 					    GENERIC2_INT_ENABLE, 1);
6590 			WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6591 
6592 			tmp = RREG32_SOC15_IP(GC, target);
6593 			tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6594 					    GENERIC2_INT_ENABLE, 1);
6595 			WREG32_SOC15_IP(GC, target, tmp);
6596 		}
6597 		break;
6598 	default:
6599 		BUG(); /* kiq only support GENERIC2_INT now */
6600 		break;
6601 	}
6602 	return 0;
6603 }
6604 #endif
6605 
6606 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6607 {
6608 	const unsigned int gcr_cntl =
6609 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6610 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6611 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6612 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6613 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6614 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6615 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6616 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6617 
6618 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6619 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6620 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6621 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6622 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6623 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6624 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6625 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6626 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6627 }
6628 
6629 static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
6630 {
6631 	struct amdgpu_device *adev = ring->adev;
6632 	int r;
6633 
6634 	if (amdgpu_sriov_vf(adev))
6635 		return -EINVAL;
6636 
6637 	r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
6638 	if (r)
6639 		return r;
6640 
6641 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
6642 	if (unlikely(r != 0)) {
6643 		dev_err(adev->dev, "fail to resv mqd_obj\n");
6644 		return r;
6645 	}
6646 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
6647 	if (!r) {
6648 		r = gfx_v11_0_kgq_init_queue(ring, true);
6649 		amdgpu_bo_kunmap(ring->mqd_obj);
6650 		ring->mqd_ptr = NULL;
6651 	}
6652 	amdgpu_bo_unreserve(ring->mqd_obj);
6653 	if (r) {
6654 		dev_err(adev->dev, "fail to unresv mqd_obj\n");
6655 		return r;
6656 	}
6657 
6658 	r = amdgpu_mes_map_legacy_queue(adev, ring);
6659 	if (r) {
6660 		dev_err(adev->dev, "failed to remap kgq\n");
6661 		return r;
6662 	}
6663 
6664 	return amdgpu_ring_test_ring(ring);
6665 }
6666 
6667 static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
6668 {
6669 	struct amdgpu_device *adev = ring->adev;
6670 	int r = 0;
6671 
6672 	if (amdgpu_sriov_vf(adev))
6673 		return -EINVAL;
6674 
6675 	r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
6676 	if (r) {
6677 		dev_err(adev->dev, "reset via MMIO failed %d\n", r);
6678 		return r;
6679 	}
6680 
6681 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
6682 	if (unlikely(r != 0)) {
6683 		dev_err(adev->dev, "fail to resv mqd_obj\n");
6684 		return r;
6685 	}
6686 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
6687 	if (!r) {
6688 		r = gfx_v11_0_kcq_init_queue(ring, true);
6689 		amdgpu_bo_kunmap(ring->mqd_obj);
6690 		ring->mqd_ptr = NULL;
6691 	}
6692 	amdgpu_bo_unreserve(ring->mqd_obj);
6693 	if (r) {
6694 		dev_err(adev->dev, "fail to unresv mqd_obj\n");
6695 		return r;
6696 	}
6697 	r = amdgpu_mes_map_legacy_queue(adev, ring);
6698 	if (r) {
6699 		dev_err(adev->dev, "failed to remap kcq\n");
6700 		return r;
6701 	}
6702 
6703 	return amdgpu_ring_test_ring(ring);
6704 }
6705 
6706 static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
6707 {
6708 	struct amdgpu_device *adev = ip_block->adev;
6709 	uint32_t i, j, k, reg, index = 0;
6710 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
6711 
6712 	if (!adev->gfx.ip_dump_core)
6713 		return;
6714 
6715 	for (i = 0; i < reg_count; i++)
6716 		drm_printf(p, "%-50s \t 0x%08x\n",
6717 			   gc_reg_list_11_0[i].reg_name,
6718 			   adev->gfx.ip_dump_core[i]);
6719 
6720 	/* print compute queue registers for all instances */
6721 	if (!adev->gfx.ip_dump_compute_queues)
6722 		return;
6723 
6724 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
6725 	drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
6726 		   adev->gfx.mec.num_mec,
6727 		   adev->gfx.mec.num_pipe_per_mec,
6728 		   adev->gfx.mec.num_queue_per_pipe);
6729 
6730 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6731 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6732 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
6733 				drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
6734 				for (reg = 0; reg < reg_count; reg++) {
6735 					drm_printf(p, "%-50s \t 0x%08x\n",
6736 						   gc_cp_reg_list_11[reg].reg_name,
6737 						   adev->gfx.ip_dump_compute_queues[index + reg]);
6738 				}
6739 				index += reg_count;
6740 			}
6741 		}
6742 	}
6743 
6744 	/* print gfx queue registers for all instances */
6745 	if (!adev->gfx.ip_dump_gfx_queues)
6746 		return;
6747 
6748 	index = 0;
6749 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
6750 	drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n",
6751 		   adev->gfx.me.num_me,
6752 		   adev->gfx.me.num_pipe_per_me,
6753 		   adev->gfx.me.num_queue_per_pipe);
6754 
6755 	for (i = 0; i < adev->gfx.me.num_me; i++) {
6756 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6757 			for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
6758 				drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k);
6759 				for (reg = 0; reg < reg_count; reg++) {
6760 					drm_printf(p, "%-50s \t 0x%08x\n",
6761 						   gc_gfx_queue_reg_list_11[reg].reg_name,
6762 						   adev->gfx.ip_dump_gfx_queues[index + reg]);
6763 				}
6764 				index += reg_count;
6765 			}
6766 		}
6767 	}
6768 }
6769 
6770 static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
6771 {
6772 	struct amdgpu_device *adev = ip_block->adev;
6773 	uint32_t i, j, k, reg, index = 0;
6774 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
6775 
6776 	if (!adev->gfx.ip_dump_core)
6777 		return;
6778 
6779 	amdgpu_gfx_off_ctrl(adev, false);
6780 	for (i = 0; i < reg_count; i++)
6781 		adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_11_0[i]));
6782 	amdgpu_gfx_off_ctrl(adev, true);
6783 
6784 	/* dump compute queue registers for all instances */
6785 	if (!adev->gfx.ip_dump_compute_queues)
6786 		return;
6787 
6788 	reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
6789 	amdgpu_gfx_off_ctrl(adev, false);
6790 	mutex_lock(&adev->srbm_mutex);
6791 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6792 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6793 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
6794 				/* ME0 is for GFX so start from 1 for CP */
6795 				soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
6796 				for (reg = 0; reg < reg_count; reg++) {
6797 					adev->gfx.ip_dump_compute_queues[index + reg] =
6798 						RREG32(SOC15_REG_ENTRY_OFFSET(
6799 							gc_cp_reg_list_11[reg]));
6800 				}
6801 				index += reg_count;
6802 			}
6803 		}
6804 	}
6805 	soc21_grbm_select(adev, 0, 0, 0, 0);
6806 	mutex_unlock(&adev->srbm_mutex);
6807 	amdgpu_gfx_off_ctrl(adev, true);
6808 
6809 	/* dump gfx queue registers for all instances */
6810 	if (!adev->gfx.ip_dump_gfx_queues)
6811 		return;
6812 
6813 	index = 0;
6814 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
6815 	amdgpu_gfx_off_ctrl(adev, false);
6816 	mutex_lock(&adev->srbm_mutex);
6817 	for (i = 0; i < adev->gfx.me.num_me; i++) {
6818 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6819 			for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
6820 				soc21_grbm_select(adev, i, j, k, 0);
6821 
6822 				for (reg = 0; reg < reg_count; reg++) {
6823 					adev->gfx.ip_dump_gfx_queues[index + reg] =
6824 						RREG32(SOC15_REG_ENTRY_OFFSET(
6825 							gc_gfx_queue_reg_list_11[reg]));
6826 				}
6827 				index += reg_count;
6828 			}
6829 		}
6830 	}
6831 	soc21_grbm_select(adev, 0, 0, 0, 0);
6832 	mutex_unlock(&adev->srbm_mutex);
6833 	amdgpu_gfx_off_ctrl(adev, true);
6834 }
6835 
6836 static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
6837 {
6838 	/* Emit the cleaner shader */
6839 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
6840 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
6841 }
6842 
6843 static void gfx_v11_0_ring_begin_use(struct amdgpu_ring *ring)
6844 {
6845 	amdgpu_gfx_profile_ring_begin_use(ring);
6846 
6847 	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
6848 }
6849 
6850 static void gfx_v11_0_ring_end_use(struct amdgpu_ring *ring)
6851 {
6852 	amdgpu_gfx_profile_ring_end_use(ring);
6853 
6854 	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
6855 }
6856 
6857 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
6858 	.name = "gfx_v11_0",
6859 	.early_init = gfx_v11_0_early_init,
6860 	.late_init = gfx_v11_0_late_init,
6861 	.sw_init = gfx_v11_0_sw_init,
6862 	.sw_fini = gfx_v11_0_sw_fini,
6863 	.hw_init = gfx_v11_0_hw_init,
6864 	.hw_fini = gfx_v11_0_hw_fini,
6865 	.suspend = gfx_v11_0_suspend,
6866 	.resume = gfx_v11_0_resume,
6867 	.is_idle = gfx_v11_0_is_idle,
6868 	.wait_for_idle = gfx_v11_0_wait_for_idle,
6869 	.soft_reset = gfx_v11_0_soft_reset,
6870 	.check_soft_reset = gfx_v11_0_check_soft_reset,
6871 	.post_soft_reset = gfx_v11_0_post_soft_reset,
6872 	.set_clockgating_state = gfx_v11_0_set_clockgating_state,
6873 	.set_powergating_state = gfx_v11_0_set_powergating_state,
6874 	.get_clockgating_state = gfx_v11_0_get_clockgating_state,
6875 	.dump_ip_state = gfx_v11_ip_dump,
6876 	.print_ip_state = gfx_v11_ip_print,
6877 };
6878 
6879 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
6880 	.type = AMDGPU_RING_TYPE_GFX,
6881 	.align_mask = 0xff,
6882 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6883 	.support_64bit_ptrs = true,
6884 	.secure_submission_supported = true,
6885 	.get_rptr = gfx_v11_0_ring_get_rptr_gfx,
6886 	.get_wptr = gfx_v11_0_ring_get_wptr_gfx,
6887 	.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
6888 	.emit_frame_size = /* totally 247 maximum if 16 IBs */
6889 		5 + /* update_spm_vmid */
6890 		5 + /* COND_EXEC */
6891 		22 + /* SET_Q_PREEMPTION_MODE */
6892 		7 + /* PIPELINE_SYNC */
6893 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6894 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6895 		4 + /* VM_FLUSH */
6896 		8 + /* FENCE for VM_FLUSH */
6897 		20 + /* GDS switch */
6898 		5 + /* COND_EXEC */
6899 		7 + /* HDP_flush */
6900 		4 + /* VGT_flush */
6901 		31 + /*	DE_META */
6902 		3 + /* CNTX_CTRL */
6903 		5 + /* HDP_INVL */
6904 		22 + /* SET_Q_PREEMPTION_MODE */
6905 		8 + 8 + /* FENCE x2 */
6906 		8 + /* gfx_v11_0_emit_mem_sync */
6907 		2, /* gfx_v11_0_ring_emit_cleaner_shader */
6908 	.emit_ib_size =	4, /* gfx_v11_0_ring_emit_ib_gfx */
6909 	.emit_ib = gfx_v11_0_ring_emit_ib_gfx,
6910 	.emit_fence = gfx_v11_0_ring_emit_fence,
6911 	.emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6912 	.emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6913 	.emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6914 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6915 	.test_ring = gfx_v11_0_ring_test_ring,
6916 	.test_ib = gfx_v11_0_ring_test_ib,
6917 	.insert_nop = gfx_v11_ring_insert_nop,
6918 	.pad_ib = amdgpu_ring_generic_pad_ib,
6919 	.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
6920 	.emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
6921 	.init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
6922 	.preempt_ib = gfx_v11_0_ring_preempt_ib,
6923 	.emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
6924 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
6925 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6926 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6927 	.soft_recovery = gfx_v11_0_ring_soft_recovery,
6928 	.emit_mem_sync = gfx_v11_0_emit_mem_sync,
6929 	.reset = gfx_v11_0_reset_kgq,
6930 	.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
6931 	.begin_use = gfx_v11_0_ring_begin_use,
6932 	.end_use = gfx_v11_0_ring_end_use,
6933 };
6934 
6935 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
6936 	.type = AMDGPU_RING_TYPE_COMPUTE,
6937 	.align_mask = 0xff,
6938 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6939 	.support_64bit_ptrs = true,
6940 	.get_rptr = gfx_v11_0_ring_get_rptr_compute,
6941 	.get_wptr = gfx_v11_0_ring_get_wptr_compute,
6942 	.set_wptr = gfx_v11_0_ring_set_wptr_compute,
6943 	.emit_frame_size =
6944 		5 + /* update_spm_vmid */
6945 		20 + /* gfx_v11_0_ring_emit_gds_switch */
6946 		7 + /* gfx_v11_0_ring_emit_hdp_flush */
6947 		5 + /* hdp invalidate */
6948 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6949 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6950 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6951 		2 + /* gfx_v11_0_ring_emit_vm_flush */
6952 		8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
6953 		8 + /* gfx_v11_0_emit_mem_sync */
6954 		2, /* gfx_v11_0_ring_emit_cleaner_shader */
6955 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
6956 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
6957 	.emit_fence = gfx_v11_0_ring_emit_fence,
6958 	.emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6959 	.emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6960 	.emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6961 	.emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6962 	.test_ring = gfx_v11_0_ring_test_ring,
6963 	.test_ib = gfx_v11_0_ring_test_ib,
6964 	.insert_nop = gfx_v11_ring_insert_nop,
6965 	.pad_ib = amdgpu_ring_generic_pad_ib,
6966 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
6967 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6968 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6969 	.soft_recovery = gfx_v11_0_ring_soft_recovery,
6970 	.emit_mem_sync = gfx_v11_0_emit_mem_sync,
6971 	.reset = gfx_v11_0_reset_kcq,
6972 	.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
6973 	.begin_use = gfx_v11_0_ring_begin_use,
6974 	.end_use = gfx_v11_0_ring_end_use,
6975 };
6976 
6977 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
6978 	.type = AMDGPU_RING_TYPE_KIQ,
6979 	.align_mask = 0xff,
6980 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6981 	.support_64bit_ptrs = true,
6982 	.get_rptr = gfx_v11_0_ring_get_rptr_compute,
6983 	.get_wptr = gfx_v11_0_ring_get_wptr_compute,
6984 	.set_wptr = gfx_v11_0_ring_set_wptr_compute,
6985 	.emit_frame_size =
6986 		20 + /* gfx_v11_0_ring_emit_gds_switch */
6987 		7 + /* gfx_v11_0_ring_emit_hdp_flush */
6988 		5 + /*hdp invalidate */
6989 		7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6990 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6991 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6992 		8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6993 	.emit_ib_size =	7, /* gfx_v11_0_ring_emit_ib_compute */
6994 	.emit_ib = gfx_v11_0_ring_emit_ib_compute,
6995 	.emit_fence = gfx_v11_0_ring_emit_fence_kiq,
6996 	.test_ring = gfx_v11_0_ring_test_ring,
6997 	.test_ib = gfx_v11_0_ring_test_ib,
6998 	.insert_nop = amdgpu_ring_insert_nop,
6999 	.pad_ib = amdgpu_ring_generic_pad_ib,
7000 	.emit_rreg = gfx_v11_0_ring_emit_rreg,
7001 	.emit_wreg = gfx_v11_0_ring_emit_wreg,
7002 	.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
7003 	.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
7004 };
7005 
7006 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
7007 {
7008 	int i;
7009 
7010 	adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
7011 
7012 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7013 		adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
7014 
7015 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
7016 		adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
7017 }
7018 
7019 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
7020 	.set = gfx_v11_0_set_eop_interrupt_state,
7021 	.process = gfx_v11_0_eop_irq,
7022 };
7023 
7024 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
7025 	.set = gfx_v11_0_set_priv_reg_fault_state,
7026 	.process = gfx_v11_0_priv_reg_irq,
7027 };
7028 
7029 static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = {
7030 	.set = gfx_v11_0_set_bad_op_fault_state,
7031 	.process = gfx_v11_0_bad_op_irq,
7032 };
7033 
7034 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
7035 	.set = gfx_v11_0_set_priv_inst_fault_state,
7036 	.process = gfx_v11_0_priv_inst_irq,
7037 };
7038 
7039 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
7040 	.process = gfx_v11_0_rlc_gc_fed_irq,
7041 };
7042 
7043 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
7044 {
7045 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7046 	adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
7047 
7048 	adev->gfx.priv_reg_irq.num_types = 1;
7049 	adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
7050 
7051 	adev->gfx.bad_op_irq.num_types = 1;
7052 	adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs;
7053 
7054 	adev->gfx.priv_inst_irq.num_types = 1;
7055 	adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
7056 
7057 	adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
7058 	adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
7059 
7060 }
7061 
7062 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
7063 {
7064 	if (adev->flags & AMD_IS_APU)
7065 		adev->gfx.imu.mode = MISSION_MODE;
7066 	else
7067 		adev->gfx.imu.mode = DEBUG_MODE;
7068 
7069 	adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
7070 }
7071 
7072 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
7073 {
7074 	adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
7075 }
7076 
7077 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
7078 {
7079 	unsigned total_cu = adev->gfx.config.max_cu_per_sh *
7080 			    adev->gfx.config.max_sh_per_se *
7081 			    adev->gfx.config.max_shader_engines;
7082 
7083 	adev->gds.gds_size = 0x1000;
7084 	adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
7085 	adev->gds.gws_size = 64;
7086 	adev->gds.oa_size = 16;
7087 }
7088 
7089 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
7090 {
7091 	/* set gfx eng mqd */
7092 	adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
7093 		sizeof(struct v11_gfx_mqd);
7094 	adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
7095 		gfx_v11_0_gfx_mqd_init;
7096 	/* set compute eng mqd */
7097 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
7098 		sizeof(struct v11_compute_mqd);
7099 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
7100 		gfx_v11_0_compute_mqd_init;
7101 }
7102 
7103 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
7104 							  u32 bitmap)
7105 {
7106 	u32 data;
7107 
7108 	if (!bitmap)
7109 		return;
7110 
7111 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
7112 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
7113 
7114 	WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
7115 }
7116 
7117 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
7118 {
7119 	u32 data, wgp_bitmask;
7120 	data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
7121 	data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
7122 
7123 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
7124 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
7125 
7126 	wgp_bitmask =
7127 		amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
7128 
7129 	return (~data) & wgp_bitmask;
7130 }
7131 
7132 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
7133 {
7134 	u32 wgp_idx, wgp_active_bitmap;
7135 	u32 cu_bitmap_per_wgp, cu_active_bitmap;
7136 
7137 	wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
7138 	cu_active_bitmap = 0;
7139 
7140 	for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
7141 		/* if there is one WGP enabled, it means 2 CUs will be enabled */
7142 		cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
7143 		if (wgp_active_bitmap & (1 << wgp_idx))
7144 			cu_active_bitmap |= cu_bitmap_per_wgp;
7145 	}
7146 
7147 	return cu_active_bitmap;
7148 }
7149 
7150 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
7151 				 struct amdgpu_cu_info *cu_info)
7152 {
7153 	int i, j, k, counter, active_cu_number = 0;
7154 	u32 mask, bitmap;
7155 	unsigned disable_masks[8 * 2];
7156 
7157 	if (!adev || !cu_info)
7158 		return -EINVAL;
7159 
7160 	amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
7161 
7162 	mutex_lock(&adev->grbm_idx_mutex);
7163 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7164 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7165 			bitmap = i * adev->gfx.config.max_sh_per_se + j;
7166 			if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
7167 				continue;
7168 			mask = 1;
7169 			counter = 0;
7170 			gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
7171 			if (i < 8 && j < 2)
7172 				gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
7173 					adev, disable_masks[i * 2 + j]);
7174 			bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
7175 
7176 			/**
7177 			 * GFX11 could support more than 4 SEs, while the bitmap
7178 			 * in cu_info struct is 4x4 and ioctl interface struct
7179 			 * drm_amdgpu_info_device should keep stable.
7180 			 * So we use last two columns of bitmap to store cu mask for
7181 			 * SEs 4 to 7, the layout of the bitmap is as below:
7182 			 *    SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
7183 			 *    SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
7184 			 *    SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
7185 			 *    SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
7186 			 *    SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
7187 			 *    SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
7188 			 *    SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
7189 			 *    SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
7190 			 */
7191 			cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
7192 
7193 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
7194 				if (bitmap & mask)
7195 					counter++;
7196 
7197 				mask <<= 1;
7198 			}
7199 			active_cu_number += counter;
7200 		}
7201 	}
7202 	gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7203 	mutex_unlock(&adev->grbm_idx_mutex);
7204 
7205 	cu_info->number = active_cu_number;
7206 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7207 
7208 	return 0;
7209 }
7210 
7211 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
7212 {
7213 	.type = AMD_IP_BLOCK_TYPE_GFX,
7214 	.major = 11,
7215 	.minor = 0,
7216 	.rev = 0,
7217 	.funcs = &gfx_v11_0_ip_funcs,
7218 };
7219