xref: /linux/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c (revision 5946dbe1c802efef3b12a4eecab1471f725f4ca9)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "imu_v12_0.h"
33 #include "soc24.h"
34 #include "nvd.h"
35 
36 #include "gc/gc_12_0_0_offset.h"
37 #include "gc/gc_12_0_0_sh_mask.h"
38 #include "soc24_enum.h"
39 #include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
40 
41 #include "soc15.h"
42 #include "clearstate_gfx12.h"
43 #include "v12_structs.h"
44 #include "gfx_v12_0.h"
45 #include "nbif_v6_3_1.h"
46 #include "mes_v12_0.h"
47 #include "mes_userqueue.h"
48 #include "amdgpu_userq_fence.h"
49 
50 #define GFX12_NUM_GFX_RINGS	1
51 #define GFX12_MEC_HPD_SIZE	2048
52 
53 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
54 
55 #define regCP_GFX_MQD_CONTROL_DEFAULT                                             0x00000100
56 #define regCP_GFX_HQD_VMID_DEFAULT                                                0x00000000
57 #define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT                                      0x00000000
58 #define regCP_GFX_HQD_QUANTUM_DEFAULT                                             0x00000a01
59 #define regCP_GFX_HQD_CNTL_DEFAULT                                                0x00f00000
60 #define regCP_RB_DOORBELL_CONTROL_DEFAULT                                         0x00000000
61 #define regCP_GFX_HQD_RPTR_DEFAULT                                                0x00000000
62 
63 #define regCP_HQD_EOP_CONTROL_DEFAULT                                             0x00000006
64 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
65 #define regCP_MQD_CONTROL_DEFAULT                                                 0x00000100
66 #define regCP_HQD_PQ_CONTROL_DEFAULT                                              0x00308509
67 #define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT                                     0x00000000
68 #define regCP_HQD_PQ_RPTR_DEFAULT                                                 0x00000000
69 #define regCP_HQD_PERSISTENT_STATE_DEFAULT                                        0x0be05501
70 #define regCP_HQD_IB_CONTROL_DEFAULT                                              0x00300000
71 
72 
73 MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin");
75 MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin");
76 MODULE_FIRMWARE("amdgpu/gc_12_0_0_rlc.bin");
77 MODULE_FIRMWARE("amdgpu/gc_12_0_0_toc.bin");
78 MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin");
79 MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin");
80 MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin");
81 MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin");
82 MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin");
83 MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin");
84 
85 static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
86 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
87 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
88 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3),
89 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
90 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
91 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3),
92 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
93 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
94 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
95 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
96 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
97 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2),
98 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2),
99 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
100 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
101 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0),
102 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE),
103 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
104 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR),
105 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE),
106 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR),
107 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR),
108 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
109 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ),
110 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
111 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
112 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
113 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO),
114 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI),
115 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ),
116 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
117 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
118 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
119 	SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS),
120 	SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2),
121 	SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS),
122 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
123 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES),
124 	SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS),
125 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
126 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL),
127 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_LO32),
128 	SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS_HI32),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR),
139 	/* cp header registers */
140 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
143 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
144 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
145 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
146 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
147 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
148 	/* SE status registers */
149 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
150 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
151 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
152 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
153 };
154 
155 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = {
156 	/* compute registers */
157 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
158 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
159 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
160 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
161 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
162 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
163 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
164 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
165 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
166 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
167 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
168 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
169 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
170 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
171 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
172 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
173 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
174 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
175 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
176 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
177 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
178 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
179 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
180 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
181 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
182 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
183 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
184 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
185 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
186 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
187 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
188 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
189 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
190 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
191 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
192 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
193 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
194 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
195 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
196 	/* cp header registers */
197 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
198 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
199 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
200 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
201 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
202 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
203 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
204 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
205 };
206 
207 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
208 	/* gfx queue registers */
209 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE),
210 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID),
211 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY),
212 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM),
213 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE),
214 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI),
215 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET),
216 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL),
217 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR),
218 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR),
219 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI),
220 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST),
221 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED),
222 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL),
223 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0),
224 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0),
225 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR),
226 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI),
227 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO),
228 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI),
229 	SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
230 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
231 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
232 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
233 	SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
234 	/* cp header registers */
235 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
236 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
237 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
238 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
239 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
240 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
241 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
242 	SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
243 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
244 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
245 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
246 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
247 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
248 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
249 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
250 	SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
251 };
252 
253 static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = {
254 	SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f),
255 	SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000),
256 	SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020)
257 };
258 
259 static const struct soc15_reg_golden golden_settings_gc_12_0[] = {
260 	SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x00008000, 0x00008000),
261 };
262 
263 #define DEFAULT_SH_MEM_CONFIG \
264 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
265 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
266 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
267 
268 static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev);
269 static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev);
270 static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev);
271 static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev);
272 static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev);
273 static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev);
274 static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev,
275 				 struct amdgpu_cu_info *cu_info);
276 static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev);
277 static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
278 				   u32 sh_num, u32 instance, int xcc_id);
279 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
280 
281 static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
282 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
283 				     uint32_t val);
284 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
285 static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
286 					   uint16_t pasid, uint32_t flush_type,
287 					   bool all_hub, uint8_t dst_sel);
288 static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
289 static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
290 static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev,
291 				      bool enable);
292 
293 static void gfx_v12_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
294 					uint64_t queue_mask)
295 {
296 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
297 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
298 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
299 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
300 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
301 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
302 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
303 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
304 	amdgpu_ring_write(kiq_ring, 0);
305 }
306 
307 static void gfx_v12_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
308 				     struct amdgpu_ring *ring)
309 {
310 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
311 	uint64_t wptr_addr = ring->wptr_gpu_addr;
312 	uint32_t me = 0, eng_sel = 0;
313 
314 	switch (ring->funcs->type) {
315 	case AMDGPU_RING_TYPE_COMPUTE:
316 		me = 1;
317 		eng_sel = 0;
318 		break;
319 	case AMDGPU_RING_TYPE_GFX:
320 		me = 0;
321 		eng_sel = 4;
322 		break;
323 	case AMDGPU_RING_TYPE_MES:
324 		me = 2;
325 		eng_sel = 5;
326 		break;
327 	default:
328 		WARN_ON(1);
329 	}
330 
331 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
332 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
333 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
334 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
335 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
336 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
337 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
338 			  PACKET3_MAP_QUEUES_ME((me)) |
339 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
340 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
341 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
342 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
343 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
344 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
345 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
346 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
347 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
348 }
349 
350 static void gfx_v12_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
351 				       struct amdgpu_ring *ring,
352 				       enum amdgpu_unmap_queues_action action,
353 				       u64 gpu_addr, u64 seq)
354 {
355 	struct amdgpu_device *adev = kiq_ring->adev;
356 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
357 
358 	if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
359 		amdgpu_mes_unmap_legacy_queue(adev, ring, action,
360 					      gpu_addr, seq, 0);
361 		return;
362 	}
363 
364 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
365 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
366 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
367 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
368 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
369 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
370 	amdgpu_ring_write(kiq_ring,
371 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
372 
373 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
374 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
375 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
376 		amdgpu_ring_write(kiq_ring, seq);
377 	} else {
378 		amdgpu_ring_write(kiq_ring, 0);
379 		amdgpu_ring_write(kiq_ring, 0);
380 		amdgpu_ring_write(kiq_ring, 0);
381 	}
382 }
383 
384 static void gfx_v12_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
385 				       struct amdgpu_ring *ring,
386 				       u64 addr, u64 seq)
387 {
388 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
389 
390 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
391 	amdgpu_ring_write(kiq_ring,
392 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
393 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
394 			  PACKET3_QUERY_STATUS_COMMAND(2));
395 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
396 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
397 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
398 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
399 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
400 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
401 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
402 }
403 
404 static void gfx_v12_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
405 					  uint16_t pasid,
406 					  uint32_t flush_type,
407 					  bool all_hub)
408 {
409 	gfx_v12_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
410 }
411 
412 static const struct kiq_pm4_funcs gfx_v12_0_kiq_pm4_funcs = {
413 	.kiq_set_resources = gfx_v12_0_kiq_set_resources,
414 	.kiq_map_queues = gfx_v12_0_kiq_map_queues,
415 	.kiq_unmap_queues = gfx_v12_0_kiq_unmap_queues,
416 	.kiq_query_status = gfx_v12_0_kiq_query_status,
417 	.kiq_invalidate_tlbs = gfx_v12_0_kiq_invalidate_tlbs,
418 	.set_resources_size = 8,
419 	.map_queues_size = 7,
420 	.unmap_queues_size = 6,
421 	.query_status_size = 7,
422 	.invalidate_tlbs_size = 2,
423 };
424 
425 static void gfx_v12_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
426 {
427 	adev->gfx.kiq[0].pmf = &gfx_v12_0_kiq_pm4_funcs;
428 }
429 
430 static void gfx_v12_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
431 				   int mem_space, int opt, uint32_t addr0,
432 				   uint32_t addr1, uint32_t ref,
433 				   uint32_t mask, uint32_t inv)
434 {
435 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
436 	amdgpu_ring_write(ring,
437 			  /* memory (1) or register (0) */
438 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
439 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
440 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
441 			   WAIT_REG_MEM_ENGINE(eng_sel)));
442 
443 	if (mem_space)
444 		BUG_ON(addr0 & 0x3); /* Dword align */
445 	amdgpu_ring_write(ring, addr0);
446 	amdgpu_ring_write(ring, addr1);
447 	amdgpu_ring_write(ring, ref);
448 	amdgpu_ring_write(ring, mask);
449 	amdgpu_ring_write(ring, inv); /* poll interval */
450 }
451 
452 static int gfx_v12_0_ring_test_ring(struct amdgpu_ring *ring)
453 {
454 	struct amdgpu_device *adev = ring->adev;
455 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
456 	uint32_t tmp = 0;
457 	unsigned i;
458 	int r;
459 
460 	WREG32(scratch, 0xCAFEDEAD);
461 	r = amdgpu_ring_alloc(ring, 5);
462 	if (r) {
463 		drm_err(adev_to_drm(adev),
464 			"cp failed to lock ring %d (%d).\n",
465 			ring->idx, r);
466 		return r;
467 	}
468 
469 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
470 		gfx_v12_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
471 	} else {
472 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
473 		amdgpu_ring_write(ring, scratch -
474 				  PACKET3_SET_UCONFIG_REG_START);
475 		amdgpu_ring_write(ring, 0xDEADBEEF);
476 	}
477 	amdgpu_ring_commit(ring);
478 
479 	for (i = 0; i < adev->usec_timeout; i++) {
480 		tmp = RREG32(scratch);
481 		if (tmp == 0xDEADBEEF)
482 			break;
483 		if (amdgpu_emu_mode == 1)
484 			msleep(1);
485 		else
486 			udelay(1);
487 	}
488 
489 	if (i >= adev->usec_timeout)
490 		r = -ETIMEDOUT;
491 	return r;
492 }
493 
494 static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
495 {
496 	struct amdgpu_device *adev = ring->adev;
497 	struct amdgpu_ib ib;
498 	struct dma_fence *f = NULL;
499 	unsigned index;
500 	uint64_t gpu_addr;
501 	uint32_t *cpu_ptr;
502 	long r;
503 
504 	/* MES KIQ fw hasn't indirect buffer support for now */
505 	if (adev->enable_mes_kiq &&
506 	    ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
507 		return 0;
508 
509 	memset(&ib, 0, sizeof(ib));
510 
511 	r = amdgpu_device_wb_get(adev, &index);
512 	if (r)
513 		return r;
514 
515 	gpu_addr = adev->wb.gpu_addr + (index * 4);
516 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
517 	cpu_ptr = &adev->wb.wb[index];
518 
519 	r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
520 	if (r) {
521 		drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r);
522 		goto err1;
523 	}
524 
525 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
526 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
527 	ib.ptr[2] = lower_32_bits(gpu_addr);
528 	ib.ptr[3] = upper_32_bits(gpu_addr);
529 	ib.ptr[4] = 0xDEADBEEF;
530 	ib.length_dw = 5;
531 
532 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
533 	if (r)
534 		goto err2;
535 
536 	r = dma_fence_wait_timeout(f, false, timeout);
537 	if (r == 0) {
538 		r = -ETIMEDOUT;
539 		goto err2;
540 	} else if (r < 0) {
541 		goto err2;
542 	}
543 
544 	if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
545 		r = 0;
546 	else
547 		r = -EINVAL;
548 err2:
549 	amdgpu_ib_free(&ib, NULL);
550 	dma_fence_put(f);
551 err1:
552 	amdgpu_device_wb_free(adev, index);
553 	return r;
554 }
555 
556 static void gfx_v12_0_free_microcode(struct amdgpu_device *adev)
557 {
558 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
559 	amdgpu_ucode_release(&adev->gfx.me_fw);
560 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
561 	amdgpu_ucode_release(&adev->gfx.mec_fw);
562 
563 	kfree(adev->gfx.rlc.register_list_format);
564 }
565 
566 static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
567 {
568 	const struct psp_firmware_header_v1_0 *toc_hdr;
569 	int err = 0;
570 
571 	err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
572 				   AMDGPU_UCODE_REQUIRED,
573 				   "amdgpu/%s_toc.bin", ucode_prefix);
574 	if (err)
575 		goto out;
576 
577 	toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
578 	adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
579 	adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
580 	adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
581 	adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
582 			le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
583 	return 0;
584 out:
585 	amdgpu_ucode_release(&adev->psp.toc_fw);
586 	return err;
587 }
588 
589 static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
590 {
591 	char ucode_prefix[30];
592 	int err;
593 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
594 	uint16_t version_major;
595 	uint16_t version_minor;
596 
597 	DRM_DEBUG("\n");
598 
599 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
600 
601 	err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
602 				   AMDGPU_UCODE_REQUIRED,
603 				   "amdgpu/%s_pfp.bin", ucode_prefix);
604 	if (err)
605 		goto out;
606 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
607 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
608 
609 	err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
610 				   AMDGPU_UCODE_REQUIRED,
611 				   "amdgpu/%s_me.bin", ucode_prefix);
612 	if (err)
613 		goto out;
614 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
615 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
616 
617 	if (!amdgpu_sriov_vf(adev)) {
618 		if (amdgpu_is_kicker_fw(adev))
619 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
620 						   AMDGPU_UCODE_REQUIRED,
621 						   "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
622 		else
623 			err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
624 						   AMDGPU_UCODE_REQUIRED,
625 						   "amdgpu/%s_rlc.bin", ucode_prefix);
626 		if (err)
627 			goto out;
628 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
629 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
630 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
631 		err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
632 		if (err)
633 			goto out;
634 	}
635 
636 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
637 				   AMDGPU_UCODE_REQUIRED,
638 				   "amdgpu/%s_mec.bin", ucode_prefix);
639 	if (err)
640 		goto out;
641 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
642 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
643 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
644 
645 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
646 		err = gfx_v12_0_init_toc_microcode(adev, ucode_prefix);
647 
648 	/* only one MEC for gfx 12 */
649 	adev->gfx.mec2_fw = NULL;
650 
651 	if (adev->gfx.imu.funcs) {
652 		if (adev->gfx.imu.funcs->init_microcode) {
653 			err = adev->gfx.imu.funcs->init_microcode(adev);
654 			if (err)
655 				dev_err(adev->dev, "Failed to load imu firmware!\n");
656 		}
657 	}
658 
659 out:
660 	if (err) {
661 		amdgpu_ucode_release(&adev->gfx.pfp_fw);
662 		amdgpu_ucode_release(&adev->gfx.me_fw);
663 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
664 		amdgpu_ucode_release(&adev->gfx.mec_fw);
665 	}
666 
667 	return err;
668 }
669 
670 static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev)
671 {
672 	u32 count = 0;
673 	const struct cs_section_def *sect = NULL;
674 	const struct cs_extent_def *ext = NULL;
675 
676 	count += 1;
677 
678 	for (sect = gfx12_cs_data; sect->section != NULL; ++sect) {
679 		if (sect->id == SECT_CONTEXT) {
680 			for (ext = sect->section; ext->extent != NULL; ++ext)
681 				count += 2 + ext->reg_count;
682 		} else
683 			return 0;
684 	}
685 
686 	return count;
687 }
688 
689 static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
690 {
691 	u32 count = 0, clustercount = 0, i;
692 	const struct cs_section_def *sect = NULL;
693 	const struct cs_extent_def *ext = NULL;
694 
695 	if (adev->gfx.rlc.cs_data == NULL)
696 		return;
697 	if (buffer == NULL)
698 		return;
699 
700 	count += 1;
701 
702 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
703 		if (sect->id == SECT_CONTEXT) {
704 			for (ext = sect->section; ext->extent != NULL; ++ext) {
705 				clustercount++;
706 				buffer[count++] = ext->reg_count;
707 				buffer[count++] = ext->reg_index;
708 
709 				for (i = 0; i < ext->reg_count; i++)
710 					buffer[count++] = cpu_to_le32(ext->extent[i]);
711 			}
712 		} else
713 			return;
714 	}
715 
716 	buffer[0] = clustercount;
717 }
718 
719 static void gfx_v12_0_rlc_fini(struct amdgpu_device *adev)
720 {
721 	/* clear state block */
722 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
723 			&adev->gfx.rlc.clear_state_gpu_addr,
724 			(void **)&adev->gfx.rlc.cs_ptr);
725 
726 	/* jump table block */
727 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
728 			&adev->gfx.rlc.cp_table_gpu_addr,
729 			(void **)&adev->gfx.rlc.cp_table_ptr);
730 }
731 
732 static void gfx_v12_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
733 {
734 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
735 
736 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
737 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
738 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
739 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
740 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
741 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
742 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
743 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
744 	adev->gfx.rlc.rlcg_reg_access_supported = true;
745 }
746 
747 static int gfx_v12_0_rlc_init(struct amdgpu_device *adev)
748 {
749 	const struct cs_section_def *cs_data;
750 	int r;
751 
752 	adev->gfx.rlc.cs_data = gfx12_cs_data;
753 
754 	cs_data = adev->gfx.rlc.cs_data;
755 
756 	if (cs_data) {
757 		/* init clear state block */
758 		r = amdgpu_gfx_rlc_init_csb(adev);
759 		if (r)
760 			return r;
761 	}
762 
763 	/* init spm vmid with 0xf */
764 	if (adev->gfx.rlc.funcs->update_spm_vmid)
765 		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0, NULL, 0xf);
766 
767 	return 0;
768 }
769 
770 static void gfx_v12_0_mec_fini(struct amdgpu_device *adev)
771 {
772 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
773 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
774 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
775 }
776 
777 static void gfx_v12_0_me_init(struct amdgpu_device *adev)
778 {
779 	bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
780 
781 	amdgpu_gfx_graphics_queue_acquire(adev);
782 }
783 
784 static int gfx_v12_0_mec_init(struct amdgpu_device *adev)
785 {
786 	int r;
787 	u32 *hpd;
788 	size_t mec_hpd_size;
789 
790 	bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
791 
792 	/* take ownership of the relevant compute queues */
793 	amdgpu_gfx_compute_queue_acquire(adev);
794 	mec_hpd_size = adev->gfx.num_compute_rings * GFX12_MEC_HPD_SIZE;
795 
796 	if (mec_hpd_size) {
797 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
798 					      AMDGPU_GEM_DOMAIN_GTT,
799 					      &adev->gfx.mec.hpd_eop_obj,
800 					      &adev->gfx.mec.hpd_eop_gpu_addr,
801 					      (void **)&hpd);
802 		if (r) {
803 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
804 			gfx_v12_0_mec_fini(adev);
805 			return r;
806 		}
807 
808 		memset(hpd, 0, mec_hpd_size);
809 
810 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
811 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
812 	}
813 
814 	return 0;
815 }
816 
817 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
818 {
819 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
820 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
821 		(address << SQ_IND_INDEX__INDEX__SHIFT));
822 	return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
823 }
824 
825 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
826 			   uint32_t thread, uint32_t regno,
827 			   uint32_t num, uint32_t *out)
828 {
829 	WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
830 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
831 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
832 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
833 		(SQ_IND_INDEX__AUTO_INCR_MASK));
834 	while (num--)
835 		*(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
836 }
837 
838 static void gfx_v12_0_read_wave_data(struct amdgpu_device *adev,
839 				     uint32_t xcc_id,
840 				     uint32_t simd, uint32_t wave,
841 				     uint32_t *dst, int *no_fields)
842 {
843 	/* in gfx12 the SIMD_ID is specified as part of the INSTANCE
844 	 * field when performing a select_se_sh so it should be
845 	 * zero here */
846 	WARN_ON(simd != 0);
847 
848 	/* type 4 wave data */
849 	dst[(*no_fields)++] = 4;
850 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
851 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
852 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
853 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
854 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
855 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
856 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
857 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
858 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
859 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
860 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
861 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
862 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
863 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
864 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATE_PRIV);
865 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXCP_FLAG_PRIV);
866 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXCP_FLAG_USER);
867 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAP_CTRL);
868 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_ACTIVE);
869 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_VALID_AND_IDLE);
870 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_DVGPR_ALLOC_LO);
871 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_DVGPR_ALLOC_HI);
872 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_SCHED_MODE);
873 }
874 
875 static void gfx_v12_0_read_wave_sgprs(struct amdgpu_device *adev,
876 				      uint32_t xcc_id, uint32_t simd,
877 				      uint32_t wave, uint32_t start,
878 				      uint32_t size, uint32_t *dst)
879 {
880 	WARN_ON(simd != 0);
881 
882 	wave_read_regs(
883 		adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
884 		dst);
885 }
886 
887 static void gfx_v12_0_read_wave_vgprs(struct amdgpu_device *adev,
888 				      uint32_t xcc_id, uint32_t simd,
889 				      uint32_t wave, uint32_t thread,
890 				      uint32_t start, uint32_t size,
891 				      uint32_t *dst)
892 {
893 	wave_read_regs(
894 		adev, wave, thread,
895 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
896 }
897 
898 static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev,
899 				       u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
900 {
901 	soc24_grbm_select(adev, me, pipe, q, vm);
902 }
903 
904 /* all sizes are in bytes */
905 #define MQD_SHADOW_BASE_SIZE      73728
906 #define MQD_SHADOW_BASE_ALIGNMENT 256
907 #define MQD_FWWORKAREA_SIZE       484
908 #define MQD_FWWORKAREA_ALIGNMENT  256
909 
910 static void gfx_v12_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
911 						  struct amdgpu_gfx_shadow_info *shadow_info)
912 {
913 	/* for gfx */
914 	shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
915 	shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
916 	shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
917 	shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
918 	/* for compute */
919 	shadow_info->eop_size = GFX12_MEC_HPD_SIZE;
920 	shadow_info->eop_alignment = 256;
921 }
922 
923 static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev,
924 					 struct amdgpu_gfx_shadow_info *shadow_info,
925 					 bool skip_check)
926 {
927 	if (adev->gfx.cp_gfx_shadow || skip_check) {
928 		gfx_v12_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
929 		return 0;
930 	}
931 
932 	memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
933 	return -EINVAL;
934 }
935 
936 static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
937 	.get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter,
938 	.select_se_sh = &gfx_v12_0_select_se_sh,
939 	.read_wave_data = &gfx_v12_0_read_wave_data,
940 	.read_wave_sgprs = &gfx_v12_0_read_wave_sgprs,
941 	.read_wave_vgprs = &gfx_v12_0_read_wave_vgprs,
942 	.select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
943 	.update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
944 	.get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
945 	.get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
946 };
947 
948 static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
949 {
950 
951 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
952 	case IP_VERSION(12, 0, 0):
953 	case IP_VERSION(12, 0, 1):
954 		adev->gfx.config.max_hw_contexts = 8;
955 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
956 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
957 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
958 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
959 		break;
960 	default:
961 		BUG();
962 		break;
963 	}
964 
965 	return 0;
966 }
967 
968 static int gfx_v12_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
969 				   int me, int pipe, int queue)
970 {
971 	int r;
972 	struct amdgpu_ring *ring;
973 	unsigned int irq_type;
974 
975 	ring = &adev->gfx.gfx_ring[ring_id];
976 
977 	ring->me = me;
978 	ring->pipe = pipe;
979 	ring->queue = queue;
980 
981 	ring->ring_obj = NULL;
982 	ring->use_doorbell = true;
983 
984 	if (!ring_id)
985 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
986 	else
987 		ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
988 	ring->vm_hub = AMDGPU_GFXHUB(0);
989 	sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
990 
991 	irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
992 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
993 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
994 	if (r)
995 		return r;
996 	return 0;
997 }
998 
999 static int gfx_v12_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1000 				       int mec, int pipe, int queue)
1001 {
1002 	int r;
1003 	unsigned irq_type;
1004 	struct amdgpu_ring *ring;
1005 	unsigned int hw_prio;
1006 
1007 	ring = &adev->gfx.compute_ring[ring_id];
1008 
1009 	/* mec0 is me1 */
1010 	ring->me = mec + 1;
1011 	ring->pipe = pipe;
1012 	ring->queue = queue;
1013 
1014 	ring->ring_obj = NULL;
1015 	ring->use_doorbell = true;
1016 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1017 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1018 				+ (ring_id * GFX12_MEC_HPD_SIZE);
1019 	ring->vm_hub = AMDGPU_GFXHUB(0);
1020 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1021 
1022 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1023 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1024 		+ ring->pipe;
1025 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1026 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1027 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1028 	r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1029 			     hw_prio, NULL);
1030 	if (r)
1031 		return r;
1032 
1033 	return 0;
1034 }
1035 
1036 static struct {
1037 	SOC24_FIRMWARE_ID	id;
1038 	unsigned int		offset;
1039 	unsigned int		size;
1040 	unsigned int		size_x16;
1041 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX];
1042 
1043 #define RLC_TOC_OFFSET_DWUNIT   8
1044 #define RLC_SIZE_MULTIPLE       1024
1045 #define RLC_TOC_UMF_SIZE_inM	23ULL
1046 #define RLC_TOC_FORMAT_API	165ULL
1047 
1048 static void gfx_v12_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
1049 {
1050 	RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc;
1051 
1052 	while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) {
1053 		rlc_autoload_info[ucode->id].id = ucode->id;
1054 		rlc_autoload_info[ucode->id].offset =
1055 			ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4;
1056 		rlc_autoload_info[ucode->id].size =
1057 			ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 :
1058 					  ucode->size * 4;
1059 		ucode++;
1060 	}
1061 }
1062 
1063 static uint32_t gfx_v12_0_calc_toc_total_size(struct amdgpu_device *adev)
1064 {
1065 	uint32_t total_size = 0;
1066 	SOC24_FIRMWARE_ID id;
1067 
1068 	gfx_v12_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
1069 
1070 	for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++)
1071 		total_size += rlc_autoload_info[id].size;
1072 
1073 	/* In case the offset in rlc toc ucode is aligned */
1074 	if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset)
1075 		total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset +
1076 			rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size;
1077 	if (total_size < (RLC_TOC_UMF_SIZE_inM << 20))
1078 		total_size = RLC_TOC_UMF_SIZE_inM << 20;
1079 
1080 	return total_size;
1081 }
1082 
1083 static int gfx_v12_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1084 {
1085 	int r;
1086 	uint32_t total_size;
1087 
1088 	total_size = gfx_v12_0_calc_toc_total_size(adev);
1089 
1090 	r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1091 				      AMDGPU_GEM_DOMAIN_VRAM,
1092 				      &adev->gfx.rlc.rlc_autoload_bo,
1093 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
1094 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1095 
1096 	if (r) {
1097 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1098 		return r;
1099 	}
1100 
1101 	return 0;
1102 }
1103 
1104 static void gfx_v12_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1105 						       SOC24_FIRMWARE_ID id,
1106 						       const void *fw_data,
1107 						       uint32_t fw_size)
1108 {
1109 	uint32_t toc_offset;
1110 	uint32_t toc_fw_size;
1111 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1112 
1113 	if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX)
1114 		return;
1115 
1116 	toc_offset = rlc_autoload_info[id].offset;
1117 	toc_fw_size = rlc_autoload_info[id].size;
1118 
1119 	if (fw_size == 0)
1120 		fw_size = toc_fw_size;
1121 
1122 	if (fw_size > toc_fw_size)
1123 		fw_size = toc_fw_size;
1124 
1125 	memcpy(ptr + toc_offset, fw_data, fw_size);
1126 
1127 	if (fw_size < toc_fw_size)
1128 		memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1129 }
1130 
1131 static void
1132 gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
1133 {
1134 	void *data;
1135 	uint32_t size;
1136 	uint32_t *toc_ptr;
1137 
1138 	data = adev->psp.toc.start_addr;
1139 	size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size;
1140 
1141 	toc_ptr = (uint32_t *)data + size / 4 - 2;
1142 	*toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1;
1143 
1144 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC,
1145 						   data, size);
1146 }
1147 
1148 static void
1149 gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
1150 {
1151 	const __le32 *fw_data;
1152 	uint32_t fw_size;
1153 	const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1154 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1155 	const struct rlc_firmware_header_v2_1 *rlcv21_hdr;
1156 	const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1157 	uint16_t version_major, version_minor;
1158 
1159 	/* pfp ucode */
1160 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1161 		adev->gfx.pfp_fw->data;
1162 	/* instruction */
1163 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1164 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1165 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1166 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP,
1167 						   fw_data, fw_size);
1168 	/* data */
1169 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1170 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
1171 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1172 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P0_STACK,
1173 						   fw_data, fw_size);
1174 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P1_STACK,
1175 						   fw_data, fw_size);
1176 	/* me ucode */
1177 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1178 		adev->gfx.me_fw->data;
1179 	/* instruction */
1180 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1181 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1182 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1183 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME,
1184 						   fw_data, fw_size);
1185 	/* data */
1186 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1187 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
1188 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1189 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P0_STACK,
1190 						   fw_data, fw_size);
1191 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P1_STACK,
1192 						   fw_data, fw_size);
1193 	/* mec ucode */
1194 	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1195 		adev->gfx.mec_fw->data;
1196 	/* instruction */
1197 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1198 		le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1199 	fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1200 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC,
1201 						   fw_data, fw_size);
1202 	/* data */
1203 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1204 		le32_to_cpu(cpv2_hdr->data_offset_bytes));
1205 	fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1206 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK,
1207 						   fw_data, fw_size);
1208 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK,
1209 						   fw_data, fw_size);
1210 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK,
1211 						   fw_data, fw_size);
1212 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK,
1213 						   fw_data, fw_size);
1214 
1215 	/* rlc ucode */
1216 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1217 		adev->gfx.rlc_fw->data;
1218 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1219 			le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1220 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1221 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE,
1222 						   fw_data, fw_size);
1223 
1224 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1225 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1226 	if (version_major == 2) {
1227 		if (version_minor >= 1) {
1228 			rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1229 
1230 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1231 					le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes));
1232 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes);
1233 			gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH,
1234 						   fw_data, fw_size);
1235 
1236 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1237 					le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes));
1238 			fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes);
1239 			gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM,
1240 						   fw_data, fw_size);
1241 		}
1242 		if (version_minor >= 2) {
1243 			rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1244 
1245 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1246 					le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1247 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1248 			gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE,
1249 						   fw_data, fw_size);
1250 
1251 			fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1252 					le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1253 			fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1254 			gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT,
1255 						   fw_data, fw_size);
1256 		}
1257 	}
1258 }
1259 
1260 static void
1261 gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
1262 {
1263 	const __le32 *fw_data;
1264 	uint32_t fw_size;
1265 	const struct sdma_firmware_header_v3_0 *sdma_hdr;
1266 
1267 	sdma_hdr = (const struct sdma_firmware_header_v3_0 *)
1268 		adev->sdma.instance[0].fw->data;
1269 	fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1270 			le32_to_cpu(sdma_hdr->ucode_offset_bytes));
1271 	fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes);
1272 
1273 	gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0,
1274 						   fw_data, fw_size);
1275 }
1276 
1277 static void
1278 gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev)
1279 {
1280 	const __le32 *fw_data;
1281 	unsigned fw_size;
1282 	const struct mes_firmware_header_v1_0 *mes_hdr;
1283 	int pipe, ucode_id, data_id;
1284 
1285 	for (pipe = 0; pipe < 2; pipe++) {
1286 		if (pipe == 0) {
1287 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0;
1288 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK;
1289 		} else {
1290 			ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1;
1291 			data_id  = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK;
1292 		}
1293 
1294 		mes_hdr = (const struct mes_firmware_header_v1_0 *)
1295 			adev->mes.fw[pipe]->data;
1296 
1297 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1298 				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1299 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1300 
1301 		gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size);
1302 
1303 		fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1304 				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1305 		fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1306 
1307 		gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size);
1308 	}
1309 }
1310 
1311 static int gfx_v12_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1312 {
1313 	uint32_t rlc_g_offset, rlc_g_size;
1314 	uint64_t gpu_addr;
1315 	uint32_t data;
1316 
1317 	/* RLC autoload sequence 2: copy ucode */
1318 	gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1319 	gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1320 	gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(adev);
1321 	gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(adev);
1322 
1323 	rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset;
1324 	rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size;
1325 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start;
1326 
1327 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1328 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1329 
1330 	WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1331 
1332 	if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
1333 		/* RLC autoload sequence 3: load IMU fw */
1334 		if (adev->gfx.imu.funcs->load_microcode)
1335 			adev->gfx.imu.funcs->load_microcode(adev);
1336 		/* RLC autoload sequence 4 init IMU fw */
1337 		if (adev->gfx.imu.funcs->setup_imu)
1338 			adev->gfx.imu.funcs->setup_imu(adev);
1339 		if (adev->gfx.imu.funcs->start_imu)
1340 			adev->gfx.imu.funcs->start_imu(adev);
1341 
1342 		/* RLC autoload sequence 5 disable gpa mode */
1343 		gfx_v12_0_disable_gpa_mode(adev);
1344 	} else {
1345 		/* unhalt rlc to start autoload without imu */
1346 		data = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
1347 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1);
1348 		data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1349 		WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, data);
1350 		WREG32_SOC15(GC, 0, regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
1357 {
1358 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
1359 	uint32_t *ptr;
1360 	uint32_t inst;
1361 
1362 	ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
1363 	if (!ptr) {
1364 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1365 		adev->gfx.ip_dump_core = NULL;
1366 	} else {
1367 		adev->gfx.ip_dump_core = ptr;
1368 	}
1369 
1370 	/* Allocate memory for compute queue registers for all the instances */
1371 	reg_count = ARRAY_SIZE(gc_cp_reg_list_12);
1372 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1373 		adev->gfx.mec.num_queue_per_pipe;
1374 
1375 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1376 	if (!ptr) {
1377 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1378 		adev->gfx.ip_dump_compute_queues = NULL;
1379 	} else {
1380 		adev->gfx.ip_dump_compute_queues = ptr;
1381 	}
1382 
1383 	/* Allocate memory for gfx queue registers for all the instances */
1384 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12);
1385 	inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me *
1386 		adev->gfx.me.num_queue_per_pipe;
1387 
1388 	ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1389 	if (!ptr) {
1390 		DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
1391 		adev->gfx.ip_dump_gfx_queues = NULL;
1392 	} else {
1393 		adev->gfx.ip_dump_gfx_queues = ptr;
1394 	}
1395 }
1396 
1397 static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
1398 {
1399 	int i, j, k, r, ring_id = 0;
1400 	unsigned num_compute_rings;
1401 	int xcc_id = 0;
1402 	struct amdgpu_device *adev = ip_block->adev;
1403 	int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
1404 
1405 	INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
1406 
1407 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1408 	case IP_VERSION(12, 0, 0):
1409 	case IP_VERSION(12, 0, 1):
1410 		adev->gfx.me.num_me = 1;
1411 		adev->gfx.me.num_pipe_per_me = 1;
1412 		adev->gfx.me.num_queue_per_pipe = 8;
1413 		adev->gfx.mec.num_mec = 1;
1414 		adev->gfx.mec.num_pipe_per_mec = 2;
1415 		adev->gfx.mec.num_queue_per_pipe = 4;
1416 		break;
1417 	default:
1418 		adev->gfx.me.num_me = 1;
1419 		adev->gfx.me.num_pipe_per_me = 1;
1420 		adev->gfx.me.num_queue_per_pipe = 1;
1421 		adev->gfx.mec.num_mec = 1;
1422 		adev->gfx.mec.num_pipe_per_mec = 4;
1423 		adev->gfx.mec.num_queue_per_pipe = 8;
1424 		break;
1425 	}
1426 
1427 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1428 	case IP_VERSION(12, 0, 0):
1429 	case IP_VERSION(12, 0, 1):
1430 		if (!adev->gfx.disable_uq &&
1431 		    adev->gfx.me_fw_version  >= 2780 &&
1432 		    adev->gfx.pfp_fw_version >= 2840 &&
1433 		    adev->gfx.mec_fw_version >= 3050 &&
1434 		    adev->mes.fw_version[0] >= 123) {
1435 			adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
1436 			adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
1437 		}
1438 		break;
1439 	default:
1440 		break;
1441 	}
1442 
1443 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1444 	case IP_VERSION(12, 0, 0):
1445 	case IP_VERSION(12, 0, 1):
1446 		if (adev->gfx.me_fw_version  >= 2480 &&
1447 		    adev->gfx.pfp_fw_version >= 2530 &&
1448 		    adev->gfx.mec_fw_version >= 2680 &&
1449 		    adev->mes.fw_version[0] >= 100)
1450 			adev->gfx.enable_cleaner_shader = true;
1451 		break;
1452 	default:
1453 		adev->gfx.enable_cleaner_shader = false;
1454 		break;
1455 	}
1456 
1457 	if (adev->gfx.num_compute_rings) {
1458 		/* recalculate compute rings to use based on hardware configuration */
1459 		num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
1460 				     adev->gfx.mec.num_queue_per_pipe) / 2;
1461 		adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
1462 						  num_compute_rings);
1463 	}
1464 
1465 	/* EOP Event */
1466 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1467 			      GFX_12_0_0__SRCID__CP_EOP_INTERRUPT,
1468 			      &adev->gfx.eop_irq);
1469 	if (r)
1470 		return r;
1471 
1472 	/* Bad opcode Event */
1473 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1474 			      GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR,
1475 			      &adev->gfx.bad_op_irq);
1476 	if (r)
1477 		return r;
1478 
1479 	/* Privileged reg */
1480 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1481 			      GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT,
1482 			      &adev->gfx.priv_reg_irq);
1483 	if (r)
1484 		return r;
1485 
1486 	/* Privileged inst */
1487 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1488 			      GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1489 			      &adev->gfx.priv_inst_irq);
1490 	if (r)
1491 		return r;
1492 
1493 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1494 
1495 	gfx_v12_0_me_init(adev);
1496 
1497 	r = gfx_v12_0_rlc_init(adev);
1498 	if (r) {
1499 		dev_err(adev->dev, "Failed to init rlc BOs!\n");
1500 		return r;
1501 	}
1502 
1503 	r = gfx_v12_0_mec_init(adev);
1504 	if (r) {
1505 		dev_err(adev->dev, "Failed to init MEC BOs!\n");
1506 		return r;
1507 	}
1508 
1509 	if (adev->gfx.num_gfx_rings) {
1510 		/* set up the gfx ring */
1511 		for (i = 0; i < adev->gfx.me.num_me; i++) {
1512 			for (j = 0; j < num_queue_per_pipe; j++) {
1513 				for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1514 					if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1515 						continue;
1516 
1517 					r = gfx_v12_0_gfx_ring_init(adev, ring_id,
1518 								    i, k, j);
1519 					if (r)
1520 						return r;
1521 					ring_id++;
1522 				}
1523 			}
1524 		}
1525 	}
1526 
1527 	if (adev->gfx.num_compute_rings) {
1528 		ring_id = 0;
1529 		/* set up the compute queues - allocate horizontally across pipes */
1530 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1531 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1532 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1533 					if (!amdgpu_gfx_is_mec_queue_enabled(adev,
1534 									     0, i, k, j))
1535 						continue;
1536 
1537 					r = gfx_v12_0_compute_ring_init(adev, ring_id,
1538 									i, k, j);
1539 					if (r)
1540 						return r;
1541 
1542 					ring_id++;
1543 				}
1544 			}
1545 		}
1546 	}
1547 
1548 	adev->gfx.gfx_supported_reset =
1549 		amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
1550 	adev->gfx.compute_supported_reset =
1551 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1552 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1553 	case IP_VERSION(12, 0, 0):
1554 	case IP_VERSION(12, 0, 1):
1555 		if ((adev->gfx.me_fw_version >= 2660) &&
1556 		    (adev->gfx.mec_fw_version >= 2920) &&
1557 		    !amdgpu_sriov_vf(adev) &&
1558 		    !adev->debug_disable_gpu_ring_reset) {
1559 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1560 			adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1561 		}
1562 		break;
1563 	default:
1564 		break;
1565 	}
1566 
1567 	if (!adev->enable_mes_kiq) {
1568 		r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0);
1569 		if (r) {
1570 			dev_err(adev->dev, "Failed to init KIQ BOs!\n");
1571 			return r;
1572 		}
1573 
1574 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1575 		if (r)
1576 			return r;
1577 	}
1578 
1579 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_compute_mqd), 0);
1580 	if (r)
1581 		return r;
1582 
1583 	/* allocate visible FB for rlc auto-loading fw */
1584 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1585 		r = gfx_v12_0_rlc_autoload_buffer_init(adev);
1586 		if (r)
1587 			return r;
1588 	}
1589 
1590 	r = gfx_v12_0_gpu_early_init(adev);
1591 	if (r)
1592 		return r;
1593 
1594 	gfx_v12_0_alloc_ip_dump(adev);
1595 
1596 	r = amdgpu_gfx_sysfs_init(adev);
1597 	if (r)
1598 		return r;
1599 
1600 	return 0;
1601 }
1602 
1603 static void gfx_v12_0_pfp_fini(struct amdgpu_device *adev)
1604 {
1605 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1606 			      &adev->gfx.pfp.pfp_fw_gpu_addr,
1607 			      (void **)&adev->gfx.pfp.pfp_fw_ptr);
1608 
1609 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1610 			      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1611 			      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1612 }
1613 
1614 static void gfx_v12_0_me_fini(struct amdgpu_device *adev)
1615 {
1616 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1617 			      &adev->gfx.me.me_fw_gpu_addr,
1618 			      (void **)&adev->gfx.me.me_fw_ptr);
1619 
1620 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1621 			       &adev->gfx.me.me_fw_data_gpu_addr,
1622 			       (void **)&adev->gfx.me.me_fw_data_ptr);
1623 }
1624 
1625 static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1626 {
1627 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1628 			&adev->gfx.rlc.rlc_autoload_gpu_addr,
1629 			(void **)&adev->gfx.rlc.rlc_autoload_ptr);
1630 }
1631 
1632 static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
1633 {
1634 	int i;
1635 	struct amdgpu_device *adev = ip_block->adev;
1636 
1637 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1638 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1639 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1640 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1641 
1642 	amdgpu_gfx_mqd_sw_fini(adev, 0);
1643 
1644 	if (!adev->enable_mes_kiq) {
1645 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1646 		amdgpu_gfx_kiq_fini(adev, 0);
1647 	}
1648 
1649 	gfx_v12_0_pfp_fini(adev);
1650 	gfx_v12_0_me_fini(adev);
1651 	gfx_v12_0_rlc_fini(adev);
1652 	gfx_v12_0_mec_fini(adev);
1653 
1654 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1655 		gfx_v12_0_rlc_autoload_buffer_fini(adev);
1656 
1657 	gfx_v12_0_free_microcode(adev);
1658 
1659 	amdgpu_gfx_sysfs_fini(adev);
1660 
1661 	kfree(adev->gfx.ip_dump_core);
1662 	kfree(adev->gfx.ip_dump_compute_queues);
1663 	kfree(adev->gfx.ip_dump_gfx_queues);
1664 
1665 	return 0;
1666 }
1667 
1668 static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1669 				   u32 sh_num, u32 instance, int xcc_id)
1670 {
1671 	u32 data;
1672 
1673 	if (instance == 0xffffffff)
1674 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1675 				     INSTANCE_BROADCAST_WRITES, 1);
1676 	else
1677 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1678 				     instance);
1679 
1680 	if (se_num == 0xffffffff)
1681 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1682 				     1);
1683 	else
1684 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1685 
1686 	if (sh_num == 0xffffffff)
1687 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1688 				     1);
1689 	else
1690 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1691 
1692 	WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1693 }
1694 
1695 static u32 gfx_v12_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1696 {
1697 	u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1698 
1699 	gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regGRBM_CC_GC_SA_UNIT_DISABLE);
1700 	gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1701 					    GRBM_CC_GC_SA_UNIT_DISABLE,
1702 					    SA_DISABLE);
1703 	gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGRBM_GC_USER_SA_UNIT_DISABLE);
1704 	gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1705 						 GRBM_GC_USER_SA_UNIT_DISABLE,
1706 						 SA_DISABLE);
1707 	sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1708 					    adev->gfx.config.max_shader_engines);
1709 
1710 	return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1711 }
1712 
1713 static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1714 {
1715 	u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1716 	u32 rb_mask;
1717 
1718 	gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1719 	gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1720 					    CC_RB_BACKEND_DISABLE,
1721 					    BACKEND_DISABLE);
1722 	gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1723 	gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1724 						 GC_USER_RB_BACKEND_DISABLE,
1725 						 BACKEND_DISABLE);
1726 	rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1727 					    adev->gfx.config.max_shader_engines);
1728 
1729 	return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1730 }
1731 
1732 static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
1733 {
1734 	u32 rb_bitmap_per_sa;
1735 	u32 rb_bitmap_width_per_sa;
1736 	u32 max_sa;
1737 	u32 active_sa_bitmap;
1738 	u32 global_active_rb_bitmap;
1739 	u32 active_rb_bitmap = 0;
1740 	u32 i;
1741 
1742 	/* query sa bitmap from SA_UNIT_DISABLE registers */
1743 	active_sa_bitmap = gfx_v12_0_get_sa_active_bitmap(adev);
1744 	/* query rb bitmap from RB_BACKEND_DISABLE registers */
1745 	global_active_rb_bitmap = gfx_v12_0_get_rb_active_bitmap(adev);
1746 
1747 	/* generate active rb bitmap according to active sa bitmap */
1748 	max_sa = adev->gfx.config.max_shader_engines *
1749 		 adev->gfx.config.max_sh_per_se;
1750 	rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1751 				 adev->gfx.config.max_sh_per_se;
1752 	rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
1753 
1754 	for (i = 0; i < max_sa; i++) {
1755 		if (active_sa_bitmap & (1 << i))
1756 			active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
1757 	}
1758 
1759 	active_rb_bitmap &= global_active_rb_bitmap;
1760 	adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1761 	adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1762 }
1763 
1764 #define LDS_APP_BASE           0x1
1765 #define SCRATCH_APP_BASE       0x2
1766 
1767 static void gfx_v12_0_init_compute_vmid(struct amdgpu_device *adev)
1768 {
1769 	int i;
1770 	uint32_t sh_mem_bases;
1771 	uint32_t data;
1772 
1773 	/*
1774 	 * Configure apertures:
1775 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1776 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1777 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1778 	 */
1779 	sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1780 			SCRATCH_APP_BASE;
1781 
1782 	mutex_lock(&adev->srbm_mutex);
1783 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1784 		soc24_grbm_select(adev, 0, 0, 0, i);
1785 		/* CP and shaders */
1786 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1787 		WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1788 
1789 		/* Enable trap for each kfd vmid. */
1790 		data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
1791 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1792 		WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
1793 	}
1794 	soc24_grbm_select(adev, 0, 0, 0, 0);
1795 	mutex_unlock(&adev->srbm_mutex);
1796 }
1797 
1798 static void gfx_v12_0_tcp_harvest(struct amdgpu_device *adev)
1799 {
1800 	/* TODO: harvest feature to be added later. */
1801 }
1802 
1803 static void gfx_v12_0_get_tcc_info(struct amdgpu_device *adev)
1804 {
1805 }
1806 
1807 static void gfx_v12_0_constants_init(struct amdgpu_device *adev)
1808 {
1809 	u32 tmp;
1810 	int i;
1811 
1812 	if (!amdgpu_sriov_vf(adev))
1813 		WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1814 
1815 	gfx_v12_0_setup_rb(adev);
1816 	gfx_v12_0_get_cu_info(adev, &adev->gfx.cu_info);
1817 	gfx_v12_0_get_tcc_info(adev);
1818 	adev->gfx.config.pa_sc_tile_steering_override = 0;
1819 
1820 	/* XXX SH_MEM regs */
1821 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1822 	mutex_lock(&adev->srbm_mutex);
1823 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1824 		soc24_grbm_select(adev, 0, 0, 0, i);
1825 		/* CP and shaders */
1826 		WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1827 		if (i != 0) {
1828 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1829 				(adev->gmc.private_aperture_start >> 48));
1830 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1831 				(adev->gmc.shared_aperture_start >> 48));
1832 			WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
1833 		}
1834 	}
1835 	soc24_grbm_select(adev, 0, 0, 0, 0);
1836 
1837 	mutex_unlock(&adev->srbm_mutex);
1838 
1839 	gfx_v12_0_init_compute_vmid(adev);
1840 }
1841 
1842 static u32 gfx_v12_0_get_cpg_int_cntl(struct amdgpu_device *adev,
1843 				      int me, int pipe)
1844 {
1845 	if (me != 0)
1846 		return 0;
1847 
1848 	switch (pipe) {
1849 	case 0:
1850 		return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
1851 	default:
1852 		return 0;
1853 	}
1854 }
1855 
1856 static u32 gfx_v12_0_get_cpc_int_cntl(struct amdgpu_device *adev,
1857 				      int me, int pipe)
1858 {
1859 	/*
1860 	 * amdgpu controls only the first MEC. That's why this function only
1861 	 * handles the setting of interrupts for this specific MEC. All other
1862 	 * pipes' interrupts are set by amdkfd.
1863 	 */
1864 	if (me != 1)
1865 		return 0;
1866 
1867 	switch (pipe) {
1868 	case 0:
1869 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
1870 	case 1:
1871 		return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
1872 	default:
1873 		return 0;
1874 	}
1875 }
1876 
1877 static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1878 					       bool enable)
1879 {
1880 	u32 tmp, cp_int_cntl_reg;
1881 	int i, j;
1882 
1883 	if (amdgpu_sriov_vf(adev))
1884 		return;
1885 
1886 	for (i = 0; i < adev->gfx.me.num_me; i++) {
1887 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
1888 			cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
1889 
1890 			if (cp_int_cntl_reg) {
1891 				tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
1892 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1893 						    enable ? 1 : 0);
1894 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1895 						    enable ? 1 : 0);
1896 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1897 						    enable ? 1 : 0);
1898 				tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1899 						    enable ? 1 : 0);
1900 				WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
1901 			}
1902 		}
1903 	}
1904 }
1905 
1906 static int gfx_v12_0_init_csb(struct amdgpu_device *adev)
1907 {
1908 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1909 
1910 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
1911 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
1912 	WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
1913 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1914 	WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1915 
1916 	return 0;
1917 }
1918 
1919 static void gfx_v12_0_rlc_stop(struct amdgpu_device *adev)
1920 {
1921 	u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
1922 
1923 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1924 	WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
1925 }
1926 
1927 static void gfx_v12_0_rlc_reset(struct amdgpu_device *adev)
1928 {
1929 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1930 	udelay(50);
1931 	WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1932 	udelay(50);
1933 }
1934 
1935 static void gfx_v12_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1936 					     bool enable)
1937 {
1938 	uint32_t rlc_pg_cntl;
1939 
1940 	rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
1941 
1942 	if (!enable) {
1943 		/* RLC_PG_CNTL[23] = 0 (default)
1944 		 * RLC will wait for handshake acks with SMU
1945 		 * GFXOFF will be enabled
1946 		 * RLC_PG_CNTL[23] = 1
1947 		 * RLC will not issue any message to SMU
1948 		 * hence no handshake between SMU & RLC
1949 		 * GFXOFF will be disabled
1950 		 */
1951 		rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1952 	} else
1953 		rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1954 	WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
1955 }
1956 
1957 static void gfx_v12_0_rlc_start(struct amdgpu_device *adev)
1958 {
1959 	/* TODO: enable rlc & smu handshake until smu
1960 	 * and gfxoff feature works as expected */
1961 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1962 		gfx_v12_0_rlc_smu_handshake_cntl(adev, false);
1963 
1964 	WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1965 	udelay(50);
1966 }
1967 
1968 static void gfx_v12_0_rlc_enable_srm(struct amdgpu_device *adev)
1969 {
1970 	uint32_t tmp;
1971 
1972 	/* enable Save Restore Machine */
1973 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
1974 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1975 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1976 	WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
1977 }
1978 
1979 static void gfx_v12_0_load_rlcg_microcode(struct amdgpu_device *adev)
1980 {
1981 	const struct rlc_firmware_header_v2_0 *hdr;
1982 	const __le32 *fw_data;
1983 	unsigned i, fw_size;
1984 
1985 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1986 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1987 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1988 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1989 
1990 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
1991 		     RLCG_UCODE_LOADING_START_ADDRESS);
1992 
1993 	for (i = 0; i < fw_size; i++)
1994 		WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
1995 			     le32_to_cpup(fw_data++));
1996 
1997 	WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1998 }
1999 
2000 static void gfx_v12_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
2001 {
2002 	const struct rlc_firmware_header_v2_2 *hdr;
2003 	const __le32 *fw_data;
2004 	unsigned i, fw_size;
2005 	u32 tmp;
2006 
2007 	hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
2008 
2009 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2010 			le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
2011 	fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
2012 
2013 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
2014 
2015 	for (i = 0; i < fw_size; i++) {
2016 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2017 			msleep(1);
2018 		WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
2019 				le32_to_cpup(fw_data++));
2020 	}
2021 
2022 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2023 
2024 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2025 			le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
2026 	fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
2027 
2028 	WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
2029 	for (i = 0; i < fw_size; i++) {
2030 		if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2031 			msleep(1);
2032 		WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
2033 				le32_to_cpup(fw_data++));
2034 	}
2035 
2036 	WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2037 
2038 	tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
2039 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
2040 	tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
2041 	WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
2042 }
2043 
2044 static int gfx_v12_0_rlc_load_microcode(struct amdgpu_device *adev)
2045 {
2046 	const struct rlc_firmware_header_v2_0 *hdr;
2047 	uint16_t version_major;
2048 	uint16_t version_minor;
2049 
2050 	if (!adev->gfx.rlc_fw)
2051 		return -EINVAL;
2052 
2053 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2054 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
2055 
2056 	version_major = le16_to_cpu(hdr->header.header_version_major);
2057 	version_minor = le16_to_cpu(hdr->header.header_version_minor);
2058 
2059 	if (version_major == 2) {
2060 		gfx_v12_0_load_rlcg_microcode(adev);
2061 		if (amdgpu_dpm == 1) {
2062 			if (version_minor >= 2)
2063 				gfx_v12_0_load_rlc_iram_dram_microcode(adev);
2064 		}
2065 
2066 		return 0;
2067 	}
2068 
2069 	return -EINVAL;
2070 }
2071 
2072 static int gfx_v12_0_rlc_resume(struct amdgpu_device *adev)
2073 {
2074 	int r;
2075 
2076 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2077 		gfx_v12_0_init_csb(adev);
2078 
2079 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
2080 			gfx_v12_0_rlc_enable_srm(adev);
2081 	} else {
2082 		if (amdgpu_sriov_vf(adev)) {
2083 			gfx_v12_0_init_csb(adev);
2084 			return 0;
2085 		}
2086 
2087 		adev->gfx.rlc.funcs->stop(adev);
2088 
2089 		/* disable CG */
2090 		WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
2091 
2092 		/* disable PG */
2093 		WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
2094 
2095 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2096 			/* legacy rlc firmware loading */
2097 			r = gfx_v12_0_rlc_load_microcode(adev);
2098 			if (r)
2099 				return r;
2100 		}
2101 
2102 		gfx_v12_0_init_csb(adev);
2103 
2104 		adev->gfx.rlc.funcs->start(adev);
2105 	}
2106 
2107 	return 0;
2108 }
2109 
2110 static void gfx_v12_0_config_gfx_rs64(struct amdgpu_device *adev)
2111 {
2112 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2113 	const struct gfx_firmware_header_v2_0 *me_hdr;
2114 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2115 	uint32_t pipe_id, tmp;
2116 
2117 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2118 		adev->gfx.mec_fw->data;
2119 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2120 		adev->gfx.me_fw->data;
2121 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2122 		adev->gfx.pfp_fw->data;
2123 
2124 	/* config pfp program start addr */
2125 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2126 		soc24_grbm_select(adev, 0, pipe_id, 0, 0);
2127 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2128 			(pfp_hdr->ucode_start_addr_hi << 30) |
2129 			(pfp_hdr->ucode_start_addr_lo >> 2));
2130 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2131 			pfp_hdr->ucode_start_addr_hi >> 2);
2132 	}
2133 	soc24_grbm_select(adev, 0, 0, 0, 0);
2134 
2135 	/* reset pfp pipe */
2136 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2137 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2138 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2139 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2140 
2141 	/* clear pfp pipe reset */
2142 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2143 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2144 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2145 
2146 	/* config me program start addr */
2147 	for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2148 		soc24_grbm_select(adev, 0, pipe_id, 0, 0);
2149 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2150 			(me_hdr->ucode_start_addr_hi << 30) |
2151 			(me_hdr->ucode_start_addr_lo >> 2));
2152 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2153 			me_hdr->ucode_start_addr_hi>>2);
2154 	}
2155 	soc24_grbm_select(adev, 0, 0, 0, 0);
2156 
2157 	/* reset me pipe */
2158 	tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2159 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2160 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2161 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2162 
2163 	/* clear me pipe reset */
2164 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2165 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2166 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2167 
2168 	/* config mec program start addr */
2169 	for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2170 		soc24_grbm_select(adev, 1, pipe_id, 0, 0);
2171 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2172 					mec_hdr->ucode_start_addr_lo >> 2 |
2173 					mec_hdr->ucode_start_addr_hi << 30);
2174 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2175 					mec_hdr->ucode_start_addr_hi >> 2);
2176 	}
2177 	soc24_grbm_select(adev, 0, 0, 0, 0);
2178 
2179 	/* reset mec pipe */
2180 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2181 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
2182 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
2183 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
2184 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
2185 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2186 
2187 	/* clear mec pipe reset */
2188 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
2189 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
2190 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
2191 	tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
2192 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2193 }
2194 
2195 static void gfx_v12_0_set_pfp_ucode_start_addr(struct amdgpu_device *adev)
2196 {
2197 	const struct gfx_firmware_header_v2_0 *cp_hdr;
2198 	unsigned pipe_id, tmp;
2199 
2200 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
2201 		adev->gfx.pfp_fw->data;
2202 	mutex_lock(&adev->srbm_mutex);
2203 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2204 		soc24_grbm_select(adev, 0, pipe_id, 0, 0);
2205 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2206 			     (cp_hdr->ucode_start_addr_hi << 30) |
2207 			     (cp_hdr->ucode_start_addr_lo >> 2));
2208 		WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2209 			     cp_hdr->ucode_start_addr_hi>>2);
2210 
2211 		/*
2212 		 * Program CP_ME_CNTL to reset given PIPE to take
2213 		 * effect of CP_PFP_PRGRM_CNTR_START.
2214 		 */
2215 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2216 		if (pipe_id == 0)
2217 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2218 					PFP_PIPE0_RESET, 1);
2219 		else
2220 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2221 					PFP_PIPE1_RESET, 1);
2222 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2223 
2224 		/* Clear pfp pipe0 reset bit. */
2225 		if (pipe_id == 0)
2226 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2227 					PFP_PIPE0_RESET, 0);
2228 		else
2229 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2230 					PFP_PIPE1_RESET, 0);
2231 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2232 	}
2233 	soc24_grbm_select(adev, 0, 0, 0, 0);
2234 	mutex_unlock(&adev->srbm_mutex);
2235 }
2236 
2237 static void gfx_v12_0_set_me_ucode_start_addr(struct amdgpu_device *adev)
2238 {
2239 	const struct gfx_firmware_header_v2_0 *cp_hdr;
2240 	unsigned pipe_id, tmp;
2241 
2242 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
2243 		adev->gfx.me_fw->data;
2244 	mutex_lock(&adev->srbm_mutex);
2245 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2246 		soc24_grbm_select(adev, 0, pipe_id, 0, 0);
2247 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2248 			     (cp_hdr->ucode_start_addr_hi << 30) |
2249 			     (cp_hdr->ucode_start_addr_lo >> 2) );
2250 		WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2251 			     cp_hdr->ucode_start_addr_hi>>2);
2252 
2253 		/*
2254 		 * Program CP_ME_CNTL to reset given PIPE to take
2255 		 * effect of CP_ME_PRGRM_CNTR_START.
2256 		 */
2257 		tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2258 		if (pipe_id == 0)
2259 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2260 					ME_PIPE0_RESET, 1);
2261 		else
2262 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2263 					ME_PIPE1_RESET, 1);
2264 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2265 
2266 		/* Clear pfp pipe0 reset bit. */
2267 		if (pipe_id == 0)
2268 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2269 					ME_PIPE0_RESET, 0);
2270 		else
2271 			tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2272 					ME_PIPE1_RESET, 0);
2273 		WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2274 	}
2275 	soc24_grbm_select(adev, 0, 0, 0, 0);
2276 	mutex_unlock(&adev->srbm_mutex);
2277 }
2278 
2279 static void gfx_v12_0_set_mec_ucode_start_addr(struct amdgpu_device *adev)
2280 {
2281 	const struct gfx_firmware_header_v2_0 *cp_hdr;
2282 	unsigned pipe_id;
2283 
2284 	cp_hdr = (const struct gfx_firmware_header_v2_0 *)
2285 		adev->gfx.mec_fw->data;
2286 	mutex_lock(&adev->srbm_mutex);
2287 	for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) {
2288 		soc24_grbm_select(adev, 1, pipe_id, 0, 0);
2289 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2290 			     cp_hdr->ucode_start_addr_lo >> 2 |
2291 			     cp_hdr->ucode_start_addr_hi << 30);
2292 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2293 			     cp_hdr->ucode_start_addr_hi >> 2);
2294 	}
2295 	soc24_grbm_select(adev, 0, 0, 0, 0);
2296 	mutex_unlock(&adev->srbm_mutex);
2297 }
2298 
2299 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2300 {
2301 	uint32_t cp_status;
2302 	uint32_t bootload_status;
2303 	int i;
2304 
2305 	for (i = 0; i < adev->usec_timeout; i++) {
2306 		cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2307 		bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2308 
2309 		if ((cp_status == 0) &&
2310 		    (REG_GET_FIELD(bootload_status,
2311 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2312 			break;
2313 		}
2314 		udelay(1);
2315 		if (amdgpu_emu_mode)
2316 			msleep(10);
2317 	}
2318 
2319 	if (i >= adev->usec_timeout) {
2320 		dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2321 		return -ETIMEDOUT;
2322 	}
2323 
2324 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2325 		gfx_v12_0_set_pfp_ucode_start_addr(adev);
2326 		gfx_v12_0_set_me_ucode_start_addr(adev);
2327 		gfx_v12_0_set_mec_ucode_start_addr(adev);
2328 	}
2329 
2330 	return 0;
2331 }
2332 
2333 static int gfx_v12_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2334 {
2335 	int i;
2336 	u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2337 
2338 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2339 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2340 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2341 
2342 	for (i = 0; i < adev->usec_timeout; i++) {
2343 		if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
2344 			break;
2345 		udelay(1);
2346 	}
2347 
2348 	if (i >= adev->usec_timeout)
2349 		DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2350 
2351 	return 0;
2352 }
2353 
2354 static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
2355 {
2356 	int r;
2357 	const struct gfx_firmware_header_v2_0 *pfp_hdr;
2358 	const __le32 *fw_ucode, *fw_data;
2359 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2360 	uint32_t tmp;
2361 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2362 
2363 	pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2364 		adev->gfx.pfp_fw->data;
2365 
2366 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2367 
2368 	/* instruction */
2369 	fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
2370 		le32_to_cpu(pfp_hdr->ucode_offset_bytes));
2371 	fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
2372 	/* data */
2373 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2374 		le32_to_cpu(pfp_hdr->data_offset_bytes));
2375 	fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
2376 
2377 	/* 64kb align */
2378 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2379 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2380 				      &adev->gfx.pfp.pfp_fw_obj,
2381 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
2382 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
2383 	if (r) {
2384 		dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
2385 		gfx_v12_0_pfp_fini(adev);
2386 		return r;
2387 	}
2388 
2389 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
2390 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2391 				      &adev->gfx.pfp.pfp_fw_data_obj,
2392 				      &adev->gfx.pfp.pfp_fw_data_gpu_addr,
2393 				      (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
2394 	if (r) {
2395 		dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
2396 		gfx_v12_0_pfp_fini(adev);
2397 		return r;
2398 	}
2399 
2400 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
2401 	memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
2402 
2403 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2404 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
2405 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2406 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
2407 
2408 	if (amdgpu_emu_mode == 1)
2409 		amdgpu_device_flush_hdp(adev, NULL);
2410 
2411 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2412 		lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2413 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2414 		upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2415 
2416 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2417 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2418 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2419 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2420 	WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2421 
2422 	/*
2423 	 * Programming any of the CP_PFP_IC_BASE registers
2424 	 * forces invalidation of the ME L1 I$. Wait for the
2425 	 * invalidation complete
2426 	 */
2427 	for (i = 0; i < usec_timeout; i++) {
2428 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2429 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2430 			INVALIDATE_CACHE_COMPLETE))
2431 			break;
2432 		udelay(1);
2433 	}
2434 
2435 	if (i >= usec_timeout) {
2436 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2437 		return -EINVAL;
2438 	}
2439 
2440 	/* Prime the L1 instruction caches */
2441 	tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2442 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2443 	WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2444 	/* Waiting for cache primed*/
2445 	for (i = 0; i < usec_timeout; i++) {
2446 		tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2447 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2448 			ICACHE_PRIMED))
2449 			break;
2450 		udelay(1);
2451 	}
2452 
2453 	if (i >= usec_timeout) {
2454 		dev_err(adev->dev, "failed to prime instruction cache\n");
2455 		return -EINVAL;
2456 	}
2457 
2458 	mutex_lock(&adev->srbm_mutex);
2459 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2460 		soc24_grbm_select(adev, 0, pipe_id, 0, 0);
2461 
2462 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2463 			lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2464 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2465 			upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2466 	}
2467 	soc24_grbm_select(adev, 0, 0, 0, 0);
2468 	mutex_unlock(&adev->srbm_mutex);
2469 
2470 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2471 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2472 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2473 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2474 
2475 	/* Invalidate the data caches */
2476 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2477 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2478 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2479 
2480 	for (i = 0; i < usec_timeout; i++) {
2481 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2482 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2483 			INVALIDATE_DCACHE_COMPLETE))
2484 			break;
2485 		udelay(1);
2486 	}
2487 
2488 	if (i >= usec_timeout) {
2489 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2490 		return -EINVAL;
2491 	}
2492 
2493 	gfx_v12_0_set_pfp_ucode_start_addr(adev);
2494 
2495 	return 0;
2496 }
2497 
2498 static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
2499 {
2500 	int r;
2501 	const struct gfx_firmware_header_v2_0 *me_hdr;
2502 	const __le32 *fw_ucode, *fw_data;
2503 	unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2504 	uint32_t tmp;
2505 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2506 
2507 	me_hdr = (const struct gfx_firmware_header_v2_0 *)
2508 		adev->gfx.me_fw->data;
2509 
2510 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2511 
2512 	/* instruction */
2513 	fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
2514 		le32_to_cpu(me_hdr->ucode_offset_bytes));
2515 	fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
2516 	/* data */
2517 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2518 		le32_to_cpu(me_hdr->data_offset_bytes));
2519 	fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
2520 
2521 	/* 64kb align*/
2522 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2523 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2524 				      &adev->gfx.me.me_fw_obj,
2525 				      &adev->gfx.me.me_fw_gpu_addr,
2526 				      (void **)&adev->gfx.me.me_fw_ptr);
2527 	if (r) {
2528 		dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
2529 		gfx_v12_0_me_fini(adev);
2530 		return r;
2531 	}
2532 
2533 	r = amdgpu_bo_create_reserved(adev, fw_data_size,
2534 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2535 				      &adev->gfx.me.me_fw_data_obj,
2536 				      &adev->gfx.me.me_fw_data_gpu_addr,
2537 				      (void **)&adev->gfx.me.me_fw_data_ptr);
2538 	if (r) {
2539 		dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
2540 		gfx_v12_0_me_fini(adev);
2541 		return r;
2542 	}
2543 
2544 	memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
2545 	memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
2546 
2547 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2548 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
2549 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2550 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
2551 
2552 	if (amdgpu_emu_mode == 1)
2553 		amdgpu_device_flush_hdp(adev, NULL);
2554 
2555 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2556 		lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
2557 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2558 		upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2559 
2560 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2561 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2562 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2563 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2564 	WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2565 
2566 	/*
2567 	 * Programming any of the CP_ME_IC_BASE registers
2568 	 * forces invalidation of the ME L1 I$. Wait for the
2569 	 * invalidation complete
2570 	 */
2571 	for (i = 0; i < usec_timeout; i++) {
2572 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2573 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2574 			INVALIDATE_CACHE_COMPLETE))
2575 			break;
2576 		udelay(1);
2577 	}
2578 
2579 	if (i >= usec_timeout) {
2580 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2581 		return -EINVAL;
2582 	}
2583 
2584 	/* Prime the instruction caches */
2585 	tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2586 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2587 	WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2588 
2589 	/* Waiting for instruction cache primed*/
2590 	for (i = 0; i < usec_timeout; i++) {
2591 		tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2592 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2593 			ICACHE_PRIMED))
2594 			break;
2595 		udelay(1);
2596 	}
2597 
2598 	if (i >= usec_timeout) {
2599 		dev_err(adev->dev, "failed to prime instruction cache\n");
2600 		return -EINVAL;
2601 	}
2602 
2603 	mutex_lock(&adev->srbm_mutex);
2604 	for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2605 		soc24_grbm_select(adev, 0, pipe_id, 0, 0);
2606 
2607 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2608 			lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
2609 		WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2610 			upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
2611 	}
2612 	soc24_grbm_select(adev, 0, 0, 0, 0);
2613 	mutex_unlock(&adev->srbm_mutex);
2614 
2615 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2616 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2617 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2618 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2619 
2620 	/* Invalidate the data caches */
2621 	tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2622 	tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2623 	WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2624 
2625 	for (i = 0; i < usec_timeout; i++) {
2626 		tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2627 		if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2628 			INVALIDATE_DCACHE_COMPLETE))
2629 			break;
2630 		udelay(1);
2631 	}
2632 
2633 	if (i >= usec_timeout) {
2634 		dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2635 		return -EINVAL;
2636 	}
2637 
2638 	gfx_v12_0_set_me_ucode_start_addr(adev);
2639 
2640 	return 0;
2641 }
2642 
2643 static int gfx_v12_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2644 {
2645 	int r;
2646 
2647 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
2648 		return -EINVAL;
2649 
2650 	gfx_v12_0_cp_gfx_enable(adev, false);
2651 
2652 	r = gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(adev);
2653 	if (r) {
2654 		dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
2655 		return r;
2656 	}
2657 
2658 	r = gfx_v12_0_cp_gfx_load_me_microcode_rs64(adev);
2659 	if (r) {
2660 		dev_err(adev->dev, "(%d) failed to load me fw\n", r);
2661 		return r;
2662 	}
2663 
2664 	return 0;
2665 }
2666 
2667 static int gfx_v12_0_cp_gfx_start(struct amdgpu_device *adev)
2668 {
2669 	/* init the CP */
2670 	WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
2671 		     adev->gfx.config.max_hw_contexts - 1);
2672 	WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
2673 
2674 	if (!amdgpu_async_gfx_ring)
2675 		gfx_v12_0_cp_gfx_enable(adev, true);
2676 
2677 	return 0;
2678 }
2679 
2680 static void gfx_v12_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
2681 					 CP_PIPE_ID pipe)
2682 {
2683 	u32 tmp;
2684 
2685 	tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
2686 	tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
2687 
2688 	WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
2689 }
2690 
2691 static void gfx_v12_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
2692 					  struct amdgpu_ring *ring)
2693 {
2694 	u32 tmp;
2695 
2696 	tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
2697 	if (ring->use_doorbell) {
2698 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2699 				    DOORBELL_OFFSET, ring->doorbell_index);
2700 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2701 				    DOORBELL_EN, 1);
2702 	} else {
2703 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2704 				    DOORBELL_EN, 0);
2705 	}
2706 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
2707 
2708 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2709 			    DOORBELL_RANGE_LOWER, ring->doorbell_index);
2710 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
2711 
2712 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
2713 		     CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2714 }
2715 
2716 static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev)
2717 {
2718 	struct amdgpu_ring *ring;
2719 	u32 tmp;
2720 	u32 rb_bufsz;
2721 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
2722 
2723 	/* Set the write pointer delay */
2724 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
2725 
2726 	/* set the RB to use vmid 0 */
2727 	WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
2728 
2729 	/* Init gfx ring 0 for pipe 0 */
2730 	mutex_lock(&adev->srbm_mutex);
2731 	gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2732 
2733 	/* Set ring buffer size */
2734 	ring = &adev->gfx.gfx_ring[0];
2735 	rb_bufsz = order_base_2(ring->ring_size / 8);
2736 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2737 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2738 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
2739 
2740 	/* Initialize the ring buffer's write pointers */
2741 	ring->wptr = 0;
2742 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
2743 	WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2744 
2745 	/* set the wb address whether it's enabled or not */
2746 	rptr_addr = ring->rptr_gpu_addr;
2747 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2748 	WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2749 		     CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2750 
2751 	wptr_gpu_addr = ring->wptr_gpu_addr;
2752 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
2753 		     lower_32_bits(wptr_gpu_addr));
2754 	WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
2755 		     upper_32_bits(wptr_gpu_addr));
2756 
2757 	mdelay(1);
2758 	WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
2759 
2760 	rb_addr = ring->gpu_addr >> 8;
2761 	WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
2762 	WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2763 
2764 	WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
2765 
2766 	gfx_v12_0_cp_gfx_set_doorbell(adev, ring);
2767 	mutex_unlock(&adev->srbm_mutex);
2768 
2769 	/* Switch to pipe 0 */
2770 	mutex_lock(&adev->srbm_mutex);
2771 	gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2772 	mutex_unlock(&adev->srbm_mutex);
2773 
2774 	/* start the ring */
2775 	gfx_v12_0_cp_gfx_start(adev);
2776 	return 0;
2777 }
2778 
2779 static void gfx_v12_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2780 {
2781 	u32 data;
2782 
2783 	data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2784 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
2785 						 enable ? 0 : 1);
2786 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
2787 						 enable ? 0 : 1);
2788 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
2789 						 enable ? 0 : 1);
2790 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
2791 						 enable ? 0 : 1);
2792 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
2793 						 enable ? 0 : 1);
2794 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
2795 						 enable ? 1 : 0);
2796 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
2797 			                         enable ? 1 : 0);
2798 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
2799 						 enable ? 1 : 0);
2800 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
2801 						 enable ? 1 : 0);
2802 	data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
2803 						 enable ? 0 : 1);
2804 	WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
2805 
2806 	adev->gfx.kiq[0].ring.sched.ready = enable;
2807 
2808 	udelay(50);
2809 }
2810 
2811 static int gfx_v12_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
2812 {
2813 	const struct gfx_firmware_header_v2_0 *mec_hdr;
2814 	const __le32 *fw_ucode, *fw_data;
2815 	u32 tmp, fw_ucode_size, fw_data_size;
2816 	u32 i, usec_timeout = 50000; /* Wait for 50 ms */
2817 	u32 *fw_ucode_ptr, *fw_data_ptr;
2818 	int r;
2819 
2820 	if (!adev->gfx.mec_fw)
2821 		return -EINVAL;
2822 
2823 	gfx_v12_0_cp_compute_enable(adev, false);
2824 
2825 	mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
2826 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2827 
2828 	fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
2829 				le32_to_cpu(mec_hdr->ucode_offset_bytes));
2830 	fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
2831 
2832 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
2833 				le32_to_cpu(mec_hdr->data_offset_bytes));
2834 	fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
2835 
2836 	r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2837 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2838 				      &adev->gfx.mec.mec_fw_obj,
2839 				      &adev->gfx.mec.mec_fw_gpu_addr,
2840 				      (void **)&fw_ucode_ptr);
2841 	if (r) {
2842 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
2843 		gfx_v12_0_mec_fini(adev);
2844 		return r;
2845 	}
2846 
2847 	r = amdgpu_bo_create_reserved(adev,
2848 				      ALIGN(fw_data_size, 64 * 1024) *
2849 				      adev->gfx.mec.num_pipe_per_mec,
2850 				      64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2851 				      &adev->gfx.mec.mec_fw_data_obj,
2852 				      &adev->gfx.mec.mec_fw_data_gpu_addr,
2853 				      (void **)&fw_data_ptr);
2854 	if (r) {
2855 		dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
2856 		gfx_v12_0_mec_fini(adev);
2857 		return r;
2858 	}
2859 
2860 	memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
2861 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2862 		memcpy(fw_data_ptr + i * ALIGN(fw_data_size, 64 * 1024) / 4, fw_data, fw_data_size);
2863 	}
2864 
2865 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
2866 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
2867 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
2868 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
2869 
2870 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2871 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2872 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2873 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2874 	WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2875 
2876 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2877 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2878 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2879 	WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2880 
2881 	mutex_lock(&adev->srbm_mutex);
2882 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2883 		soc24_grbm_select(adev, 1, i, 0, 0);
2884 
2885 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO,
2886 			     lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2887 					   i * ALIGN(fw_data_size, 64 * 1024)));
2888 		WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2889 			     upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr +
2890 					   i * ALIGN(fw_data_size, 64 * 1024)));
2891 
2892 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2893 			     lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2894 		WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2895 			     upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2896 	}
2897 	mutex_unlock(&adev->srbm_mutex);
2898 	soc24_grbm_select(adev, 0, 0, 0, 0);
2899 
2900 	/* Trigger an invalidation of the L1 instruction caches */
2901 	tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2902 	tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2903 	WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2904 
2905 	/* Wait for invalidation complete */
2906 	for (i = 0; i < usec_timeout; i++) {
2907 		tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2908 		if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2909 				       INVALIDATE_DCACHE_COMPLETE))
2910 			break;
2911 		udelay(1);
2912 	}
2913 
2914 	if (i >= usec_timeout) {
2915 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2916 		return -EINVAL;
2917 	}
2918 
2919 	/* Trigger an invalidation of the L1 instruction caches */
2920 	tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2921 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2922 	WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2923 
2924 	/* Wait for invalidation complete */
2925 	for (i = 0; i < usec_timeout; i++) {
2926 		tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2927 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2928 				       INVALIDATE_CACHE_COMPLETE))
2929 			break;
2930 		udelay(1);
2931 	}
2932 
2933 	if (i >= usec_timeout) {
2934 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2935 		return -EINVAL;
2936 	}
2937 
2938 	gfx_v12_0_set_mec_ucode_start_addr(adev);
2939 
2940 	return 0;
2941 }
2942 
2943 static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring)
2944 {
2945 	uint32_t tmp;
2946 	struct amdgpu_device *adev = ring->adev;
2947 
2948 	/* tell RLC which is KIQ queue */
2949 	tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
2950 	tmp &= 0xffffff00;
2951 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2952 	WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
2953 }
2954 
2955 static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev)
2956 {
2957 	/* set graphics engine doorbell range */
2958 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
2959 		     (adev->doorbell_index.gfx_ring0 * 2) << 2);
2960 	WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
2961 		     (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
2962 
2963 	/* set compute engine doorbell range */
2964 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
2965 		     (adev->doorbell_index.kiq * 2) << 2);
2966 	WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
2967 		     (adev->doorbell_index.userqueue_end * 2) << 2);
2968 }
2969 
2970 static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
2971 				  struct amdgpu_mqd_prop *prop)
2972 {
2973 	struct v12_gfx_mqd *mqd = m;
2974 	uint64_t hqd_gpu_addr, wb_gpu_addr;
2975 	uint32_t tmp;
2976 	uint32_t rb_bufsz;
2977 
2978 	/* set up gfx hqd wptr */
2979 	mqd->cp_gfx_hqd_wptr = 0;
2980 	mqd->cp_gfx_hqd_wptr_hi = 0;
2981 
2982 	/* set the pointer to the MQD */
2983 	mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
2984 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
2985 
2986 	/* set up mqd control */
2987 	tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
2988 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
2989 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
2990 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
2991 	mqd->cp_gfx_mqd_control = tmp;
2992 
2993 	/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
2994 	tmp = regCP_GFX_HQD_VMID_DEFAULT;
2995 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
2996 	mqd->cp_gfx_hqd_vmid = 0;
2997 
2998 	/* set up default queue priority level
2999 	 * 0x0 = low priority, 0x1 = high priority */
3000 	tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
3001 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3002 	mqd->cp_gfx_hqd_queue_priority = tmp;
3003 
3004 	/* set up time quantum */
3005 	tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
3006 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3007 	mqd->cp_gfx_hqd_quantum = tmp;
3008 
3009 	/* set up gfx hqd base. this is similar as CP_RB_BASE */
3010 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3011 	mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3012 	mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3013 
3014 	/* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3015 	wb_gpu_addr = prop->rptr_gpu_addr;
3016 	mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3017 	mqd->cp_gfx_hqd_rptr_addr_hi =
3018 		upper_32_bits(wb_gpu_addr) & 0xffff;
3019 
3020 	/* set up rb_wptr_poll addr */
3021 	wb_gpu_addr = prop->wptr_gpu_addr;
3022 	mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3023 	mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3024 
3025 	/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3026 	rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
3027 	tmp = regCP_GFX_HQD_CNTL_DEFAULT;
3028 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3029 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3030 #ifdef __BIG_ENDIAN
3031 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3032 #endif
3033 	if (prop->tmz_queue)
3034 		tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
3035 	if (!prop->kernel_queue)
3036 		tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
3037 	mqd->cp_gfx_hqd_cntl = tmp;
3038 
3039 	/* set up cp_doorbell_control */
3040 	tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
3041 	if (prop->use_doorbell) {
3042 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3043 				    DOORBELL_OFFSET, prop->doorbell_index);
3044 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3045 				    DOORBELL_EN, 1);
3046 	} else
3047 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3048 				    DOORBELL_EN, 0);
3049 	mqd->cp_rb_doorbell_control = tmp;
3050 
3051 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3052 	mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
3053 
3054 	/* active the queue */
3055 	mqd->cp_gfx_hqd_active = 1;
3056 
3057 	/* set gfx UQ items */
3058 	mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
3059 	mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
3060 	mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
3061 	mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
3062 	mqd->fence_address_lo = lower_32_bits(prop->fence_address);
3063 	mqd->fence_address_hi = upper_32_bits(prop->fence_address);
3064 
3065 	return 0;
3066 }
3067 
3068 static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
3069 {
3070 	struct amdgpu_device *adev = ring->adev;
3071 	struct v12_gfx_mqd *mqd = ring->mqd_ptr;
3072 	int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3073 
3074 	if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
3075 		memset((void *)mqd, 0, sizeof(*mqd));
3076 		mutex_lock(&adev->srbm_mutex);
3077 		soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3078 		amdgpu_ring_init_mqd(ring);
3079 		soc24_grbm_select(adev, 0, 0, 0, 0);
3080 		mutex_unlock(&adev->srbm_mutex);
3081 		if (adev->gfx.me.mqd_backup[mqd_idx])
3082 			memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3083 	} else {
3084 		/* restore mqd with the backup copy */
3085 		if (adev->gfx.me.mqd_backup[mqd_idx])
3086 			memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3087 		/* reset the ring */
3088 		ring->wptr = 0;
3089 		*ring->wptr_cpu_addr = 0;
3090 		amdgpu_ring_clear_ring(ring);
3091 	}
3092 
3093 	return 0;
3094 }
3095 
3096 static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3097 {
3098 	int i, r;
3099 
3100 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3101 		r = gfx_v12_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false);
3102 		if (r)
3103 			return r;
3104 	}
3105 
3106 	r = amdgpu_gfx_enable_kgq(adev, 0);
3107 	if (r)
3108 		return r;
3109 
3110 	return gfx_v12_0_cp_gfx_start(adev);
3111 }
3112 
3113 static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
3114 				      struct amdgpu_mqd_prop *prop)
3115 {
3116 	struct v12_compute_mqd *mqd = m;
3117 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3118 	uint32_t tmp;
3119 
3120 	mqd->header = 0xC0310800;
3121 	mqd->compute_pipelinestat_enable = 0x00000001;
3122 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3123 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3124 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3125 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3126 	mqd->compute_misc_reserved = 0x00000007;
3127 
3128 	eop_base_addr = prop->eop_gpu_addr >> 8;
3129 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3130 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3131 
3132 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3133 	tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
3134 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3135 			(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
3136 
3137 	mqd->cp_hqd_eop_control = tmp;
3138 
3139 	/* enable doorbell? */
3140 	tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
3141 
3142 	if (prop->use_doorbell) {
3143 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3144 				    DOORBELL_OFFSET, prop->doorbell_index);
3145 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3146 				    DOORBELL_EN, 1);
3147 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3148 				    DOORBELL_SOURCE, 0);
3149 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3150 				    DOORBELL_HIT, 0);
3151 	} else {
3152 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3153 				    DOORBELL_EN, 0);
3154 	}
3155 
3156 	mqd->cp_hqd_pq_doorbell_control = tmp;
3157 
3158 	/* disable the queue if it's active */
3159 	mqd->cp_hqd_dequeue_request = 0;
3160 	mqd->cp_hqd_pq_rptr = 0;
3161 	mqd->cp_hqd_pq_wptr_lo = 0;
3162 	mqd->cp_hqd_pq_wptr_hi = 0;
3163 
3164 	/* set the pointer to the MQD */
3165 	mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
3166 	mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3167 
3168 	/* set MQD vmid to 0 */
3169 	tmp = regCP_MQD_CONTROL_DEFAULT;
3170 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3171 	mqd->cp_mqd_control = tmp;
3172 
3173 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3174 	hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3175 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3176 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3177 
3178 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3179 	tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
3180 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3181 			    (order_base_2(prop->queue_size / 4) - 1));
3182 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3183 			    (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3184 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
3185 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3186 	if (prop->kernel_queue) {
3187 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3188 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3189 	}
3190 	if (prop->tmz_queue)
3191 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
3192 	mqd->cp_hqd_pq_control = tmp;
3193 
3194 	/* set the wb address whether it's enabled or not */
3195 	wb_gpu_addr = prop->rptr_gpu_addr;
3196 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3197 	mqd->cp_hqd_pq_rptr_report_addr_hi =
3198 		upper_32_bits(wb_gpu_addr) & 0xffff;
3199 
3200 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3201 	wb_gpu_addr = prop->wptr_gpu_addr;
3202 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3203 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3204 
3205 	tmp = 0;
3206 	/* enable the doorbell if requested */
3207 	if (prop->use_doorbell) {
3208 		tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
3209 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3210 				DOORBELL_OFFSET, prop->doorbell_index);
3211 
3212 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3213 				    DOORBELL_EN, 1);
3214 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3215 				    DOORBELL_SOURCE, 0);
3216 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3217 				    DOORBELL_HIT, 0);
3218 	}
3219 
3220 	mqd->cp_hqd_pq_doorbell_control = tmp;
3221 
3222 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3223 	mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
3224 
3225 	/* set the vmid for the queue */
3226 	mqd->cp_hqd_vmid = 0;
3227 
3228 	tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
3229 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
3230 	mqd->cp_hqd_persistent_state = tmp;
3231 
3232 	/* set MIN_IB_AVAIL_SIZE */
3233 	tmp = regCP_HQD_IB_CONTROL_DEFAULT;
3234 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3235 	mqd->cp_hqd_ib_control = tmp;
3236 
3237 	/* set static priority for a compute queue/ring */
3238 	mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
3239 	mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
3240 
3241 	mqd->cp_hqd_active = prop->hqd_active;
3242 
3243 	/* set UQ fenceaddress */
3244 	mqd->fence_address_lo = lower_32_bits(prop->fence_address);
3245 	mqd->fence_address_hi = upper_32_bits(prop->fence_address);
3246 
3247 	return 0;
3248 }
3249 
3250 static int gfx_v12_0_kiq_init_register(struct amdgpu_ring *ring)
3251 {
3252 	struct amdgpu_device *adev = ring->adev;
3253 	struct v12_compute_mqd *mqd = ring->mqd_ptr;
3254 	int j;
3255 
3256 	/* inactivate the queue */
3257 	if (amdgpu_sriov_vf(adev))
3258 		WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
3259 
3260 	/* disable wptr polling */
3261 	WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3262 
3263 	/* write the EOP addr */
3264 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
3265 	       mqd->cp_hqd_eop_base_addr_lo);
3266 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
3267 	       mqd->cp_hqd_eop_base_addr_hi);
3268 
3269 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3270 	WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
3271 	       mqd->cp_hqd_eop_control);
3272 
3273 	/* enable doorbell? */
3274 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3275 	       mqd->cp_hqd_pq_doorbell_control);
3276 
3277 	/* disable the queue if it's active */
3278 	if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
3279 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
3280 		for (j = 0; j < adev->usec_timeout; j++) {
3281 			if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
3282 				break;
3283 			udelay(1);
3284 		}
3285 		WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
3286 		       mqd->cp_hqd_dequeue_request);
3287 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
3288 		       mqd->cp_hqd_pq_rptr);
3289 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3290 		       mqd->cp_hqd_pq_wptr_lo);
3291 		WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3292 		       mqd->cp_hqd_pq_wptr_hi);
3293 	}
3294 
3295 	/* set the pointer to the MQD */
3296 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
3297 	       mqd->cp_mqd_base_addr_lo);
3298 	WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
3299 	       mqd->cp_mqd_base_addr_hi);
3300 
3301 	/* set MQD vmid to 0 */
3302 	WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
3303 	       mqd->cp_mqd_control);
3304 
3305 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3306 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
3307 	       mqd->cp_hqd_pq_base_lo);
3308 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
3309 	       mqd->cp_hqd_pq_base_hi);
3310 
3311 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3312 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
3313 	       mqd->cp_hqd_pq_control);
3314 
3315 	/* set the wb address whether it's enabled or not */
3316 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
3317 		mqd->cp_hqd_pq_rptr_report_addr_lo);
3318 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3319 		mqd->cp_hqd_pq_rptr_report_addr_hi);
3320 
3321 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3322 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
3323 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3324 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3325 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3326 
3327 	/* enable the doorbell if requested */
3328 	if (ring->use_doorbell) {
3329 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3330 			(adev->doorbell_index.kiq * 2) << 2);
3331 		WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3332 			(adev->doorbell_index.userqueue_end * 2) << 2);
3333 	}
3334 
3335 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3336 	       mqd->cp_hqd_pq_doorbell_control);
3337 
3338 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3339 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3340 	       mqd->cp_hqd_pq_wptr_lo);
3341 	WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3342 	       mqd->cp_hqd_pq_wptr_hi);
3343 
3344 	/* set the vmid for the queue */
3345 	WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
3346 
3347 	WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
3348 	       mqd->cp_hqd_persistent_state);
3349 
3350 	/* activate the queue */
3351 	WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
3352 	       mqd->cp_hqd_active);
3353 
3354 	if (ring->use_doorbell)
3355 		WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3356 
3357 	return 0;
3358 }
3359 
3360 static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring)
3361 {
3362 	struct amdgpu_device *adev = ring->adev;
3363 	struct v12_compute_mqd *mqd = ring->mqd_ptr;
3364 	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3365 
3366 	gfx_v12_0_kiq_setting(ring);
3367 
3368 	if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3369 		/* reset MQD to a clean status */
3370 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3371 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3372 
3373 		/* reset ring buffer */
3374 		ring->wptr = 0;
3375 		amdgpu_ring_clear_ring(ring);
3376 
3377 		mutex_lock(&adev->srbm_mutex);
3378 		soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3379 		gfx_v12_0_kiq_init_register(ring);
3380 		soc24_grbm_select(adev, 0, 0, 0, 0);
3381 		mutex_unlock(&adev->srbm_mutex);
3382 	} else {
3383 		memset((void *)mqd, 0, sizeof(*mqd));
3384 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
3385 			amdgpu_ring_clear_ring(ring);
3386 		mutex_lock(&adev->srbm_mutex);
3387 		soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3388 		amdgpu_ring_init_mqd(ring);
3389 		gfx_v12_0_kiq_init_register(ring);
3390 		soc24_grbm_select(adev, 0, 0, 0, 0);
3391 		mutex_unlock(&adev->srbm_mutex);
3392 
3393 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3394 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3395 	}
3396 
3397 	return 0;
3398 }
3399 
3400 static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
3401 {
3402 	struct amdgpu_device *adev = ring->adev;
3403 	struct v12_compute_mqd *mqd = ring->mqd_ptr;
3404 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
3405 
3406 	if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
3407 		memset((void *)mqd, 0, sizeof(*mqd));
3408 		mutex_lock(&adev->srbm_mutex);
3409 		soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3410 		amdgpu_ring_init_mqd(ring);
3411 		soc24_grbm_select(adev, 0, 0, 0, 0);
3412 		mutex_unlock(&adev->srbm_mutex);
3413 
3414 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3415 			memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3416 	} else {
3417 		/* restore MQD to a clean status */
3418 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3419 			memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3420 		/* reset ring buffer */
3421 		ring->wptr = 0;
3422 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
3423 		amdgpu_ring_clear_ring(ring);
3424 	}
3425 
3426 	return 0;
3427 }
3428 
3429 static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev)
3430 {
3431 	gfx_v12_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
3432 	adev->gfx.kiq[0].ring.sched.ready = true;
3433 	return 0;
3434 }
3435 
3436 static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
3437 {
3438 	int i, r;
3439 
3440 	if (!amdgpu_async_gfx_ring)
3441 		gfx_v12_0_cp_compute_enable(adev, true);
3442 
3443 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3444 		r = gfx_v12_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
3445 		if (r)
3446 			return r;
3447 	}
3448 
3449 	return amdgpu_gfx_enable_kcq(adev, 0);
3450 }
3451 
3452 static int gfx_v12_0_cp_resume(struct amdgpu_device *adev)
3453 {
3454 	int r, i;
3455 	struct amdgpu_ring *ring;
3456 
3457 	if (!(adev->flags & AMD_IS_APU))
3458 		gfx_v12_0_enable_gui_idle_interrupt(adev, false);
3459 
3460 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3461 		/* legacy firmware loading */
3462 		r = gfx_v12_0_cp_gfx_load_microcode(adev);
3463 		if (r)
3464 			return r;
3465 
3466 		r = gfx_v12_0_cp_compute_load_microcode_rs64(adev);
3467 		if (r)
3468 			return r;
3469 	}
3470 
3471 	gfx_v12_0_cp_set_doorbell_range(adev);
3472 
3473 	if (amdgpu_async_gfx_ring) {
3474 		gfx_v12_0_cp_compute_enable(adev, true);
3475 		gfx_v12_0_cp_gfx_enable(adev, true);
3476 	}
3477 
3478 	if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
3479 		r = amdgpu_mes_kiq_hw_init(adev, 0);
3480 	else
3481 		r = gfx_v12_0_kiq_resume(adev);
3482 	if (r)
3483 		return r;
3484 
3485 	r = gfx_v12_0_kcq_resume(adev);
3486 	if (r)
3487 		return r;
3488 
3489 	if (!amdgpu_async_gfx_ring) {
3490 		r = gfx_v12_0_cp_gfx_resume(adev);
3491 		if (r)
3492 			return r;
3493 	} else {
3494 		r = gfx_v12_0_cp_async_gfx_ring_resume(adev);
3495 		if (r)
3496 			return r;
3497 	}
3498 
3499 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3500 		ring = &adev->gfx.gfx_ring[i];
3501 		r = amdgpu_ring_test_helper(ring);
3502 		if (r)
3503 			return r;
3504 	}
3505 
3506 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3507 		ring = &adev->gfx.compute_ring[i];
3508 		r = amdgpu_ring_test_helper(ring);
3509 		if (r)
3510 			return r;
3511 	}
3512 
3513 	return 0;
3514 }
3515 
3516 static void gfx_v12_0_cp_enable(struct amdgpu_device *adev, bool enable)
3517 {
3518 	gfx_v12_0_cp_gfx_enable(adev, enable);
3519 	gfx_v12_0_cp_compute_enable(adev, enable);
3520 }
3521 
3522 static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
3523 {
3524 	int r;
3525 	bool value;
3526 
3527 	r = adev->gfxhub.funcs->gart_enable(adev);
3528 	if (r)
3529 		return r;
3530 
3531 	amdgpu_device_flush_hdp(adev, NULL);
3532 
3533 	value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
3534 
3535 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
3536 	/* TODO investigate why this and the hdp flush above is needed,
3537 	 * are we missing a flush somewhere else? */
3538 	adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
3539 
3540 	return 0;
3541 }
3542 
3543 static int get_gb_addr_config(struct amdgpu_device *adev)
3544 {
3545 	u32 gb_addr_config;
3546 
3547 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
3548 	if (gb_addr_config == 0)
3549 		return -EINVAL;
3550 
3551 	adev->gfx.config.gb_addr_config_fields.num_pkrs =
3552 		1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
3553 
3554 	adev->gfx.config.gb_addr_config = gb_addr_config;
3555 
3556 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
3557 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
3558 				      GB_ADDR_CONFIG, NUM_PIPES);
3559 
3560 	adev->gfx.config.max_tile_pipes =
3561 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3562 
3563 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
3564 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
3565 				      GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
3566 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
3567 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
3568 				      GB_ADDR_CONFIG, NUM_RB_PER_SE);
3569 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
3570 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
3571 				      GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
3572 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
3573 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
3574 				      GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
3575 
3576 	return 0;
3577 }
3578 
3579 static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev)
3580 {
3581 	uint32_t data;
3582 
3583 	data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
3584 	data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
3585 	WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
3586 
3587 	data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
3588 	data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
3589 	WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
3590 }
3591 
3592 static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev)
3593 {
3594 	if (amdgpu_sriov_vf(adev))
3595 		return;
3596 
3597 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3598 	case IP_VERSION(12, 0, 0):
3599 	case IP_VERSION(12, 0, 1):
3600 		soc15_program_register_sequence(adev,
3601 						golden_settings_gc_12_0,
3602 						(const u32)ARRAY_SIZE(golden_settings_gc_12_0));
3603 
3604 		if (adev->rev_id == 0)
3605 			soc15_program_register_sequence(adev,
3606 					golden_settings_gc_12_0_rev0,
3607 					(const u32)ARRAY_SIZE(golden_settings_gc_12_0_rev0));
3608 		break;
3609 	default:
3610 		break;
3611 	}
3612 }
3613 
3614 static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
3615 {
3616 	int r;
3617 	struct amdgpu_device *adev = ip_block->adev;
3618 
3619 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
3620 		if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
3621 			/* RLC autoload sequence 1: Program rlc ram */
3622 			if (adev->gfx.imu.funcs->program_rlc_ram)
3623 				adev->gfx.imu.funcs->program_rlc_ram(adev);
3624 		}
3625 		/* rlc autoload firmware */
3626 		r = gfx_v12_0_rlc_backdoor_autoload_enable(adev);
3627 		if (r)
3628 			return r;
3629 	} else {
3630 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3631 			if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
3632 				if (adev->gfx.imu.funcs->load_microcode)
3633 					adev->gfx.imu.funcs->load_microcode(adev);
3634 				if (adev->gfx.imu.funcs->setup_imu)
3635 					adev->gfx.imu.funcs->setup_imu(adev);
3636 				if (adev->gfx.imu.funcs->start_imu)
3637 					adev->gfx.imu.funcs->start_imu(adev);
3638 			}
3639 
3640 			/* disable gpa mode in backdoor loading */
3641 			gfx_v12_0_disable_gpa_mode(adev);
3642 		}
3643 	}
3644 
3645 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
3646 	    (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
3647 		r = gfx_v12_0_wait_for_rlc_autoload_complete(adev);
3648 		if (r) {
3649 			dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
3650 			return r;
3651 		}
3652 	}
3653 
3654 	if (!amdgpu_emu_mode)
3655 		gfx_v12_0_init_golden_registers(adev);
3656 
3657 	adev->gfx.is_poweron = true;
3658 
3659 	if (get_gb_addr_config(adev))
3660 		drm_warn(adev_to_drm(adev), "Invalid gb_addr_config !\n");
3661 
3662 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
3663 		gfx_v12_0_config_gfx_rs64(adev);
3664 
3665 	r = gfx_v12_0_gfxhub_enable(adev);
3666 	if (r)
3667 		return r;
3668 
3669 	if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT ||
3670 	     adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) &&
3671 	     (amdgpu_dpm == 1)) {
3672 		/**
3673 		 * For gfx 12, rlc firmware loading relies on smu firmware is
3674 		 * loaded firstly, so in direct type, it has to load smc ucode
3675 		 * here before rlc.
3676 		 */
3677 		r = amdgpu_pm_load_smu_firmware(adev, NULL);
3678 		if (r)
3679 			return r;
3680 	}
3681 
3682 	gfx_v12_0_constants_init(adev);
3683 
3684 	if (adev->nbio.funcs->gc_doorbell_init)
3685 		adev->nbio.funcs->gc_doorbell_init(adev);
3686 
3687 	r = gfx_v12_0_rlc_resume(adev);
3688 	if (r)
3689 		return r;
3690 
3691 	/*
3692 	 * init golden registers and rlc resume may override some registers,
3693 	 * reconfig them here
3694 	 */
3695 	gfx_v12_0_tcp_harvest(adev);
3696 
3697 	r = gfx_v12_0_cp_resume(adev);
3698 	if (r)
3699 		return r;
3700 
3701 	return r;
3702 }
3703 
3704 static int gfx_v12_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
3705 					      bool enable)
3706 {
3707 	unsigned int irq_type;
3708 	int m, p, r;
3709 
3710 	if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
3711 		for (m = 0; m < adev->gfx.me.num_me; m++) {
3712 			for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
3713 				irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
3714 				if (enable)
3715 					r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
3716 							   irq_type);
3717 				else
3718 					r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
3719 							   irq_type);
3720 				if (r)
3721 					return r;
3722 			}
3723 		}
3724 	}
3725 
3726 	if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
3727 		for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
3728 			for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
3729 				irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
3730 					+ (m * adev->gfx.mec.num_pipe_per_mec)
3731 					+ p;
3732 				if (enable)
3733 					r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
3734 							   irq_type);
3735 				else
3736 					r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
3737 							   irq_type);
3738 				if (r)
3739 					return r;
3740 			}
3741 		}
3742 	}
3743 
3744 	return 0;
3745 }
3746 
3747 static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
3748 {
3749 	struct amdgpu_device *adev = ip_block->adev;
3750 	uint32_t tmp;
3751 
3752 	cancel_delayed_work_sync(&adev->gfx.idle_work);
3753 
3754 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3755 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3756 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
3757 	gfx_v12_0_set_userq_eop_interrupts(adev, false);
3758 
3759 	if (!adev->no_hw_access) {
3760 		if (amdgpu_async_gfx_ring) {
3761 			if (amdgpu_gfx_disable_kgq(adev, 0))
3762 				DRM_ERROR("KGQ disable failed\n");
3763 		}
3764 
3765 		if (amdgpu_gfx_disable_kcq(adev, 0))
3766 			DRM_ERROR("KCQ disable failed\n");
3767 
3768 		amdgpu_mes_kiq_hw_fini(adev, 0);
3769 	}
3770 
3771 	if (amdgpu_sriov_vf(adev)) {
3772 		gfx_v12_0_cp_gfx_enable(adev, false);
3773 		/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
3774 		tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3775 		tmp &= 0xffffff00;
3776 		WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3777 
3778 		return 0;
3779 	}
3780 	gfx_v12_0_cp_enable(adev, false);
3781 	gfx_v12_0_enable_gui_idle_interrupt(adev, false);
3782 
3783 	adev->gfxhub.funcs->gart_disable(adev);
3784 
3785 	adev->gfx.is_poweron = false;
3786 
3787 	return 0;
3788 }
3789 
3790 static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block)
3791 {
3792 	return gfx_v12_0_hw_fini(ip_block);
3793 }
3794 
3795 static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block)
3796 {
3797 	return gfx_v12_0_hw_init(ip_block);
3798 }
3799 
3800 static bool gfx_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
3801 {
3802 	struct amdgpu_device *adev = ip_block->adev;
3803 
3804 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
3805 				GRBM_STATUS, GUI_ACTIVE))
3806 		return false;
3807 	else
3808 		return true;
3809 }
3810 
3811 static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
3812 {
3813 	unsigned i;
3814 	u32 tmp;
3815 	struct amdgpu_device *adev = ip_block->adev;
3816 
3817 	for (i = 0; i < adev->usec_timeout; i++) {
3818 		/* read MC_STATUS */
3819 		tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
3820 			GRBM_STATUS__GUI_ACTIVE_MASK;
3821 
3822 		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3823 			return 0;
3824 		udelay(1);
3825 	}
3826 	return -ETIMEDOUT;
3827 }
3828 
3829 static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3830 {
3831 	uint64_t clock = 0;
3832 
3833 	if (adev->smuio.funcs &&
3834 	    adev->smuio.funcs->get_gpu_clock_counter)
3835 		clock = adev->smuio.funcs->get_gpu_clock_counter(adev);
3836 	else
3837 		dev_warn(adev->dev, "query gpu clock counter is not supported\n");
3838 
3839 	return clock;
3840 }
3841 
3842 static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
3843 {
3844 	struct amdgpu_device *adev = ip_block->adev;
3845 
3846 	switch (amdgpu_user_queue) {
3847 	case -1:
3848 	case 0:
3849 	default:
3850 		adev->gfx.disable_kq = false;
3851 		adev->gfx.disable_uq = true;
3852 		break;
3853 	case 1:
3854 		adev->gfx.disable_kq = false;
3855 		adev->gfx.disable_uq = false;
3856 		break;
3857 	case 2:
3858 		adev->gfx.disable_kq = true;
3859 		adev->gfx.disable_uq = false;
3860 		break;
3861 	}
3862 
3863 	adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
3864 
3865 	if (adev->gfx.disable_kq) {
3866 		adev->gfx.num_gfx_rings = 0;
3867 		adev->gfx.num_compute_rings = 0;
3868 	} else {
3869 		adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
3870 		adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
3871 						  AMDGPU_MAX_COMPUTE_RINGS);
3872 	}
3873 
3874 	gfx_v12_0_set_kiq_pm4_funcs(adev);
3875 	gfx_v12_0_set_ring_funcs(adev);
3876 	gfx_v12_0_set_irq_funcs(adev);
3877 	gfx_v12_0_set_rlc_funcs(adev);
3878 	gfx_v12_0_set_mqd_funcs(adev);
3879 	gfx_v12_0_set_imu_funcs(adev);
3880 
3881 	gfx_v12_0_init_rlcg_reg_access_ctrl(adev);
3882 
3883 	return gfx_v12_0_init_microcode(adev);
3884 }
3885 
3886 static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block)
3887 {
3888 	struct amdgpu_device *adev = ip_block->adev;
3889 	int r;
3890 
3891 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3892 	if (r)
3893 		return r;
3894 
3895 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3896 	if (r)
3897 		return r;
3898 
3899 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
3900 	if (r)
3901 		return r;
3902 
3903 	r = gfx_v12_0_set_userq_eop_interrupts(adev, true);
3904 	if (r)
3905 		return r;
3906 
3907 	return 0;
3908 }
3909 
3910 static bool gfx_v12_0_is_rlc_enabled(struct amdgpu_device *adev)
3911 {
3912 	uint32_t rlc_cntl;
3913 
3914 	/* if RLC is not enabled, do nothing */
3915 	rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
3916 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
3917 }
3918 
3919 static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev,
3920 				    int xcc_id)
3921 {
3922 	uint32_t data;
3923 	unsigned i;
3924 
3925 	data = RLC_SAFE_MODE__CMD_MASK;
3926 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3927 
3928 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
3929 
3930 	/* wait for RLC_SAFE_MODE */
3931 	for (i = 0; i < adev->usec_timeout; i++) {
3932 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
3933 				   RLC_SAFE_MODE, CMD))
3934 			break;
3935 		udelay(1);
3936 	}
3937 }
3938 
3939 static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev,
3940 				      int xcc_id)
3941 {
3942 	WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
3943 }
3944 
3945 static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev,
3946 				      bool enable)
3947 {
3948 	uint32_t def, data;
3949 
3950 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
3951 		return;
3952 
3953 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
3954 
3955 	if (enable)
3956 		data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3957 	else
3958 		data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
3959 
3960 	if (def != data)
3961 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
3962 }
3963 
3964 static void gfx_v12_0_update_spm_vmid(struct amdgpu_device *adev,
3965 				      int xcc_id,
3966 				      struct amdgpu_ring *ring,
3967 				      unsigned vmid)
3968 {
3969 	u32 reg, data;
3970 
3971 	reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
3972 	if (amdgpu_sriov_is_pp_one_vf(adev))
3973 		data = RREG32_NO_KIQ(reg);
3974 	else
3975 		data = RREG32(reg);
3976 
3977 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
3978 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
3979 
3980 	if (amdgpu_sriov_is_pp_one_vf(adev))
3981 		WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
3982 	else
3983 		WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
3984 
3985 	if (ring
3986 	    && amdgpu_sriov_is_pp_one_vf(adev)
3987 	    && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
3988 		|| (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
3989 		uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
3990 		amdgpu_ring_emit_wreg(ring, reg, data);
3991 	}
3992 }
3993 
3994 static const struct amdgpu_rlc_funcs gfx_v12_0_rlc_funcs = {
3995 	.is_rlc_enabled = gfx_v12_0_is_rlc_enabled,
3996 	.set_safe_mode = gfx_v12_0_set_safe_mode,
3997 	.unset_safe_mode = gfx_v12_0_unset_safe_mode,
3998 	.init = gfx_v12_0_rlc_init,
3999 	.get_csb_size = gfx_v12_0_get_csb_size,
4000 	.get_csb_buffer = gfx_v12_0_get_csb_buffer,
4001 	.resume = gfx_v12_0_rlc_resume,
4002 	.stop = gfx_v12_0_rlc_stop,
4003 	.reset = gfx_v12_0_rlc_reset,
4004 	.start = gfx_v12_0_rlc_start,
4005 	.update_spm_vmid = gfx_v12_0_update_spm_vmid,
4006 };
4007 
4008 #if 0
4009 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable)
4010 {
4011 	/* TODO */
4012 }
4013 
4014 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
4015 {
4016 	/* TODO */
4017 }
4018 #endif
4019 
4020 static int gfx_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
4021 					   enum amd_powergating_state state)
4022 {
4023 	struct amdgpu_device *adev = ip_block->adev;
4024 	bool enable = (state == AMD_PG_STATE_GATE);
4025 
4026 	if (amdgpu_sriov_vf(adev))
4027 		return 0;
4028 
4029 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4030 	case IP_VERSION(12, 0, 0):
4031 	case IP_VERSION(12, 0, 1):
4032 		amdgpu_gfx_off_ctrl(adev, enable);
4033 		break;
4034 	default:
4035 		break;
4036 	}
4037 
4038 	return 0;
4039 }
4040 
4041 static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4042 						       bool enable)
4043 {
4044 	uint32_t def, data;
4045 
4046 	if (!(adev->cg_flags &
4047 	      (AMD_CG_SUPPORT_GFX_CGCG |
4048 	      AMD_CG_SUPPORT_GFX_CGLS |
4049 	      AMD_CG_SUPPORT_GFX_3D_CGCG |
4050 	      AMD_CG_SUPPORT_GFX_3D_CGLS)))
4051 		return;
4052 
4053 	if (enable) {
4054 		def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4055 
4056 		/* unset CGCG override */
4057 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4058 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4059 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4060 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4061 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
4062 		    adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4063 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4064 
4065 		/* update CGCG override bits */
4066 		if (def != data)
4067 			WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4068 
4069 		/* enable cgcg FSM(0x0000363F) */
4070 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4071 
4072 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
4073 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
4074 			data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4075 				 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4076 		}
4077 
4078 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
4079 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
4080 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4081 				 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4082 		}
4083 
4084 		if (def != data)
4085 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4086 
4087 		/* Program RLC_CGCG_CGLS_CTRL_3D */
4088 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4089 
4090 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
4091 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
4092 			data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4093 				 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4094 		}
4095 
4096 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
4097 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
4098 			data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4099 				 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4100 		}
4101 
4102 		if (def != data)
4103 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4104 
4105 		/* set IDLE_POLL_COUNT(0x00900100) */
4106 		def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
4107 
4108 		data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
4109 		data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4110 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4111 
4112 		if (def != data)
4113 			WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
4114 
4115 		data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4116 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4117 		data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4118 		data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4119 		data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4120 		WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
4121 
4122 		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4123 		data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4124 		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4125 
4126 		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4127 		if (adev->sdma.num_instances > 1) {
4128 			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4129 			data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4130 			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4131 		}
4132 	} else {
4133 		/* Program RLC_CGCG_CGLS_CTRL */
4134 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4135 
4136 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4137 			data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4138 
4139 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4140 			data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4141 
4142 		if (def != data)
4143 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4144 
4145 		/* Program RLC_CGCG_CGLS_CTRL_3D */
4146 		def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4147 
4148 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4149 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4150 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4151 			data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4152 
4153 		if (def != data)
4154 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4155 	}
4156 }
4157 
4158 static void gfx_v12_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4159 						       bool enable)
4160 {
4161 	uint32_t data, def;
4162 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
4163 		return;
4164 
4165 	/* It is disabled by HW by default */
4166 	if (enable) {
4167 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4168 			/* 1 - RLC_CGTT_MGCG_OVERRIDE */
4169 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4170 
4171 			data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4172 				  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4173 				  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4174 
4175 			if (def != data)
4176 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4177 		}
4178 	} else {
4179 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4180 			def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4181 
4182 			data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4183 				 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4184 				 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4185 
4186 			if (def != data)
4187 				WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4188 		}
4189 	}
4190 }
4191 
4192 static void gfx_v12_0_update_repeater_fgcg(struct amdgpu_device *adev,
4193 					   bool enable)
4194 {
4195 	uint32_t def, data;
4196 
4197 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
4198 		return;
4199 
4200 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4201 
4202 	if (enable)
4203 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
4204 				  RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK);
4205 	else
4206 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK |
4207 				RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK;
4208 
4209 	if (def != data)
4210 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4211 }
4212 
4213 static void gfx_v12_0_update_sram_fgcg(struct amdgpu_device *adev,
4214 				       bool enable)
4215 {
4216 	uint32_t def, data;
4217 
4218 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
4219 		return;
4220 
4221 	def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4222 
4223 	if (enable)
4224 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4225 	else
4226 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4227 
4228 	if (def != data)
4229 		WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4230 }
4231 
4232 static int gfx_v12_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4233 					    bool enable)
4234 {
4235 	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4236 
4237 	gfx_v12_0_update_coarse_grain_clock_gating(adev, enable);
4238 
4239 	gfx_v12_0_update_medium_grain_clock_gating(adev, enable);
4240 
4241 	gfx_v12_0_update_repeater_fgcg(adev, enable);
4242 
4243 	gfx_v12_0_update_sram_fgcg(adev, enable);
4244 
4245 	gfx_v12_0_update_perf_clk(adev, enable);
4246 
4247 	if (adev->cg_flags &
4248 	    (AMD_CG_SUPPORT_GFX_MGCG |
4249 	     AMD_CG_SUPPORT_GFX_CGLS |
4250 	     AMD_CG_SUPPORT_GFX_CGCG |
4251 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
4252 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
4253 		gfx_v12_0_enable_gui_idle_interrupt(adev, enable);
4254 
4255 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4256 
4257 	return 0;
4258 }
4259 
4260 static int gfx_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
4261 					   enum amd_clockgating_state state)
4262 {
4263 	struct amdgpu_device *adev = ip_block->adev;
4264 
4265 	if (amdgpu_sriov_vf(adev))
4266 		return 0;
4267 
4268 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4269 	case IP_VERSION(12, 0, 0):
4270 	case IP_VERSION(12, 0, 1):
4271 		gfx_v12_0_update_gfx_clock_gating(adev,
4272 						  state == AMD_CG_STATE_GATE);
4273 		break;
4274 	default:
4275 		break;
4276 	}
4277 
4278 	return 0;
4279 }
4280 
4281 static void gfx_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
4282 {
4283 	struct amdgpu_device *adev = ip_block->adev;
4284 	int data;
4285 
4286 	/* AMD_CG_SUPPORT_GFX_MGCG */
4287 	data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4288 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4289 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
4290 
4291 	/* AMD_CG_SUPPORT_REPEATER_FGCG */
4292 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
4293 		*flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
4294 
4295 	/* AMD_CG_SUPPORT_GFX_FGCG */
4296 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
4297 		*flags |= AMD_CG_SUPPORT_GFX_FGCG;
4298 
4299 	/* AMD_CG_SUPPORT_GFX_PERF_CLK */
4300 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
4301 		*flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
4302 
4303 	/* AMD_CG_SUPPORT_GFX_CGCG */
4304 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4305 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4306 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
4307 
4308 	/* AMD_CG_SUPPORT_GFX_CGLS */
4309 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4310 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
4311 
4312 	/* AMD_CG_SUPPORT_GFX_3D_CGCG */
4313 	data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4314 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4315 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4316 
4317 	/* AMD_CG_SUPPORT_GFX_3D_CGLS */
4318 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4319 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4320 }
4321 
4322 static u64 gfx_v12_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4323 {
4324 	/* gfx12 is 32bit rptr*/
4325 	return *(uint32_t *)ring->rptr_cpu_addr;
4326 }
4327 
4328 static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4329 {
4330 	struct amdgpu_device *adev = ring->adev;
4331 	u64 wptr;
4332 
4333 	/* XXX check if swapping is necessary on BE */
4334 	if (ring->use_doorbell) {
4335 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
4336 	} else {
4337 		wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
4338 		wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
4339 	}
4340 
4341 	return wptr;
4342 }
4343 
4344 static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4345 {
4346 	struct amdgpu_device *adev = ring->adev;
4347 
4348 	if (ring->use_doorbell) {
4349 		/* XXX check if swapping is necessary on BE */
4350 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
4351 			     ring->wptr);
4352 		WDOORBELL64(ring->doorbell_index, ring->wptr);
4353 	} else {
4354 		WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
4355 			     lower_32_bits(ring->wptr));
4356 		WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
4357 			     upper_32_bits(ring->wptr));
4358 	}
4359 }
4360 
4361 static u64 gfx_v12_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4362 {
4363 	/* gfx12 hardware is 32bit rptr */
4364 	return *(uint32_t *)ring->rptr_cpu_addr;
4365 }
4366 
4367 static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4368 {
4369 	u64 wptr;
4370 
4371 	/* XXX check if swapping is necessary on BE */
4372 	if (ring->use_doorbell)
4373 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
4374 	else
4375 		BUG();
4376 	return wptr;
4377 }
4378 
4379 static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4380 {
4381 	struct amdgpu_device *adev = ring->adev;
4382 
4383 	/* XXX check if swapping is necessary on BE */
4384 	if (ring->use_doorbell) {
4385 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
4386 			     ring->wptr);
4387 		WDOORBELL64(ring->doorbell_index, ring->wptr);
4388 	} else {
4389 		BUG(); /* only DOORBELL method supported on gfx12 now */
4390 	}
4391 }
4392 
4393 static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4394 {
4395 	struct amdgpu_device *adev = ring->adev;
4396 	u32 ref_and_mask, reg_mem_engine;
4397 
4398 	if (!adev->gfx.funcs->get_hdp_flush_mask) {
4399 		dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
4400 		return;
4401 	}
4402 
4403 	adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
4404 	gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4405 			       adev->nbio.funcs->get_hdp_flush_req_offset(adev),
4406 			       adev->nbio.funcs->get_hdp_flush_done_offset(adev),
4407 			       ref_and_mask, ref_and_mask, 0x20);
4408 }
4409 
4410 static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4411 				       struct amdgpu_job *job,
4412 				       struct amdgpu_ib *ib,
4413 				       uint32_t flags)
4414 {
4415 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4416 	u32 header, control = 0;
4417 
4418 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4419 
4420 	control |= ib->length_dw | (vmid << 24);
4421 
4422 	amdgpu_ring_write(ring, header);
4423 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4424 	amdgpu_ring_write(ring,
4425 #ifdef __BIG_ENDIAN
4426 		(2 << 0) |
4427 #endif
4428 		lower_32_bits(ib->gpu_addr));
4429 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4430 	amdgpu_ring_write(ring, control);
4431 }
4432 
4433 static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4434 					   struct amdgpu_job *job,
4435 					   struct amdgpu_ib *ib,
4436 					   uint32_t flags)
4437 {
4438 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4439 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4440 
4441 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4442 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4443 	amdgpu_ring_write(ring,
4444 #ifdef __BIG_ENDIAN
4445 				(2 << 0) |
4446 #endif
4447 				lower_32_bits(ib->gpu_addr));
4448 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4449 	amdgpu_ring_write(ring, control);
4450 }
4451 
4452 static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4453 				     u64 seq, unsigned flags)
4454 {
4455 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4456 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4457 
4458 	/* RELEASE_MEM - flush caches, send int */
4459 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4460 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
4461 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
4462 				 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
4463 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4464 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
4465 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
4466 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
4467 
4468 	/*
4469 	 * the address should be Qword aligned if 64bit write, Dword
4470 	 * aligned if only send 32bit data low (discard data high)
4471 	 */
4472 	if (write64bit)
4473 		BUG_ON(addr & 0x7);
4474 	else
4475 		BUG_ON(addr & 0x3);
4476 	amdgpu_ring_write(ring, lower_32_bits(addr));
4477 	amdgpu_ring_write(ring, upper_32_bits(addr));
4478 	amdgpu_ring_write(ring, lower_32_bits(seq));
4479 	amdgpu_ring_write(ring, upper_32_bits(seq));
4480 	amdgpu_ring_write(ring, 0);
4481 }
4482 
4483 static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4484 {
4485 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4486 	uint32_t seq = ring->fence_drv.sync_seq;
4487 	uint64_t addr = ring->fence_drv.gpu_addr;
4488 
4489 	gfx_v12_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
4490 			       upper_32_bits(addr), seq, 0xffffffff, 4);
4491 }
4492 
4493 static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
4494 				   uint16_t pasid, uint32_t flush_type,
4495 				   bool all_hub, uint8_t dst_sel)
4496 {
4497 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
4498 	amdgpu_ring_write(ring,
4499 			  PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
4500 			  PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
4501 			  PACKET3_INVALIDATE_TLBS_PASID(pasid) |
4502 			  PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
4503 }
4504 
4505 static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4506 					 unsigned vmid, uint64_t pd_addr)
4507 {
4508 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4509 
4510 	/* compute doesn't have PFP */
4511 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4512 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
4513 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4514 		amdgpu_ring_write(ring, 0x0);
4515 	}
4516 }
4517 
4518 static void gfx_v12_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4519 					  u64 seq, unsigned int flags)
4520 {
4521 	struct amdgpu_device *adev = ring->adev;
4522 
4523 	/* we only allocate 32bit for each seq wb address */
4524 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4525 
4526 	/* write fence seq to the "addr" */
4527 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4528 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4529 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4530 	amdgpu_ring_write(ring, lower_32_bits(addr));
4531 	amdgpu_ring_write(ring, upper_32_bits(addr));
4532 	amdgpu_ring_write(ring, lower_32_bits(seq));
4533 
4534 	if (flags & AMDGPU_FENCE_FLAG_INT) {
4535 		/* set register to trigger INT */
4536 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4537 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4538 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4539 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
4540 		amdgpu_ring_write(ring, 0);
4541 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4542 	}
4543 }
4544 
4545 static void gfx_v12_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
4546 					 uint32_t flags)
4547 {
4548 	uint32_t dw2 = 0;
4549 
4550 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4551 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4552 		/* set load_global_config & load_global_uconfig */
4553 		dw2 |= 0x8001;
4554 		/* set load_cs_sh_regs */
4555 		dw2 |= 0x01000000;
4556 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
4557 		dw2 |= 0x10002;
4558 	}
4559 
4560 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4561 	amdgpu_ring_write(ring, dw2);
4562 	amdgpu_ring_write(ring, 0);
4563 }
4564 
4565 static unsigned gfx_v12_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
4566 						   uint64_t addr)
4567 {
4568 	unsigned ret;
4569 
4570 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4571 	amdgpu_ring_write(ring, lower_32_bits(addr));
4572 	amdgpu_ring_write(ring, upper_32_bits(addr));
4573 	/* discard following DWs if *cond_exec_gpu_addr==0 */
4574 	amdgpu_ring_write(ring, 0);
4575 	ret = ring->wptr & ring->buf_mask;
4576 	/* patch dummy value later */
4577 	amdgpu_ring_write(ring, 0);
4578 
4579 	return ret;
4580 }
4581 
4582 static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring)
4583 {
4584 	int i, r = 0;
4585 	struct amdgpu_device *adev = ring->adev;
4586 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
4587 	struct amdgpu_ring *kiq_ring = &kiq->ring;
4588 	unsigned long flags;
4589 
4590 	if (adev->enable_mes)
4591 		return -EINVAL;
4592 
4593 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4594 		return -EINVAL;
4595 
4596 	spin_lock_irqsave(&kiq->ring_lock, flags);
4597 
4598 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
4599 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
4600 		return -ENOMEM;
4601 	}
4602 
4603 	/* assert preemption condition */
4604 	amdgpu_ring_set_preempt_cond_exec(ring, false);
4605 
4606 	/* assert IB preemption, emit the trailing fence */
4607 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
4608 				   ring->trail_fence_gpu_addr,
4609 				   ++ring->trail_seq);
4610 	amdgpu_ring_commit(kiq_ring);
4611 
4612 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4613 
4614 	/* poll the trailing fence */
4615 	for (i = 0; i < adev->usec_timeout; i++) {
4616 		if (ring->trail_seq ==
4617 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
4618 			break;
4619 		udelay(1);
4620 	}
4621 
4622 	if (i >= adev->usec_timeout) {
4623 		r = -EINVAL;
4624 		DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
4625 	}
4626 
4627 	/* deassert preemption condition */
4628 	amdgpu_ring_set_preempt_cond_exec(ring, true);
4629 	return r;
4630 }
4631 
4632 static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring,
4633 					   bool start,
4634 					   bool secure)
4635 {
4636 	uint32_t v = secure ? FRAME_TMZ : 0;
4637 
4638 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4639 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
4640 }
4641 
4642 static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
4643 				     uint32_t reg_val_offs)
4644 {
4645 	struct amdgpu_device *adev = ring->adev;
4646 
4647 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4648 	amdgpu_ring_write(ring, 0 |	/* src: register*/
4649 				(5 << 8) |	/* dst: memory */
4650 				(1 << 20));	/* write confirm */
4651 	amdgpu_ring_write(ring, reg);
4652 	amdgpu_ring_write(ring, 0);
4653 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4654 				reg_val_offs * 4));
4655 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4656 				reg_val_offs * 4));
4657 }
4658 
4659 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring,
4660 				     uint32_t reg,
4661 				     uint32_t val)
4662 {
4663 	uint32_t cmd = 0;
4664 
4665 	switch (ring->funcs->type) {
4666 	case AMDGPU_RING_TYPE_GFX:
4667 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4668 		break;
4669 	case AMDGPU_RING_TYPE_KIQ:
4670 		cmd = (1 << 16); /* no inc addr */
4671 		break;
4672 	default:
4673 		cmd = WR_CONFIRM;
4674 		break;
4675 	}
4676 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4677 	amdgpu_ring_write(ring, cmd);
4678 	amdgpu_ring_write(ring, reg);
4679 	amdgpu_ring_write(ring, 0);
4680 	amdgpu_ring_write(ring, val);
4681 }
4682 
4683 static void gfx_v12_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4684 					uint32_t val, uint32_t mask)
4685 {
4686 	gfx_v12_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4687 }
4688 
4689 static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4690 						   uint32_t reg0, uint32_t reg1,
4691 						   uint32_t ref, uint32_t mask)
4692 {
4693 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4694 
4695 	gfx_v12_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4696 			       ref, mask, 0x20);
4697 }
4698 
4699 static void
4700 gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4701 				      uint32_t me, uint32_t pipe,
4702 				      enum amdgpu_interrupt_state state)
4703 {
4704 	uint32_t cp_int_cntl, cp_int_cntl_reg;
4705 
4706 	if (!me) {
4707 		switch (pipe) {
4708 		case 0:
4709 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
4710 			break;
4711 		default:
4712 			DRM_DEBUG("invalid pipe %d\n", pipe);
4713 			return;
4714 		}
4715 	} else {
4716 		DRM_DEBUG("invalid me %d\n", me);
4717 		return;
4718 	}
4719 
4720 	switch (state) {
4721 	case AMDGPU_IRQ_STATE_DISABLE:
4722 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
4723 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4724 					    TIME_STAMP_INT_ENABLE, 0);
4725 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4726 					    GENERIC0_INT_ENABLE, 0);
4727 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
4728 		break;
4729 	case AMDGPU_IRQ_STATE_ENABLE:
4730 		cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
4731 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4732 					    TIME_STAMP_INT_ENABLE, 1);
4733 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4734 					    GENERIC0_INT_ENABLE, 1);
4735 		WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
4736 		break;
4737 	default:
4738 		break;
4739 	}
4740 }
4741 
4742 static void gfx_v12_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4743 						     int me, int pipe,
4744 						     enum amdgpu_interrupt_state state)
4745 {
4746 	u32 mec_int_cntl, mec_int_cntl_reg;
4747 
4748 	/*
4749 	 * amdgpu controls only the first MEC. That's why this function only
4750 	 * handles the setting of interrupts for this specific MEC. All other
4751 	 * pipes' interrupts are set by amdkfd.
4752 	 */
4753 
4754 	if (me == 1) {
4755 		switch (pipe) {
4756 		case 0:
4757 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
4758 			break;
4759 		case 1:
4760 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
4761 			break;
4762 		default:
4763 			DRM_DEBUG("invalid pipe %d\n", pipe);
4764 			return;
4765 		}
4766 	} else {
4767 		DRM_DEBUG("invalid me %d\n", me);
4768 		return;
4769 	}
4770 
4771 	switch (state) {
4772 	case AMDGPU_IRQ_STATE_DISABLE:
4773 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
4774 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4775 					     TIME_STAMP_INT_ENABLE, 0);
4776 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4777 					     GENERIC0_INT_ENABLE, 0);
4778 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
4779 		break;
4780 	case AMDGPU_IRQ_STATE_ENABLE:
4781 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
4782 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4783 					     TIME_STAMP_INT_ENABLE, 1);
4784 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4785 					     GENERIC0_INT_ENABLE, 1);
4786 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
4787 		break;
4788 	default:
4789 		break;
4790 	}
4791 }
4792 
4793 static int gfx_v12_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4794 					    struct amdgpu_irq_src *src,
4795 					    unsigned type,
4796 					    enum amdgpu_interrupt_state state)
4797 {
4798 	switch (type) {
4799 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4800 		gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
4801 		break;
4802 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
4803 		gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
4804 		break;
4805 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4806 		gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4807 		break;
4808 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4809 		gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4810 		break;
4811 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4812 		gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4813 		break;
4814 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4815 		gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4816 		break;
4817 	default:
4818 		break;
4819 	}
4820 	return 0;
4821 }
4822 
4823 static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
4824 			     struct amdgpu_irq_src *source,
4825 			     struct amdgpu_iv_entry *entry)
4826 {
4827 	u32 doorbell_offset = entry->src_data[0];
4828 	u8 me_id, pipe_id, queue_id;
4829 	struct amdgpu_ring *ring;
4830 	int i;
4831 
4832 	DRM_DEBUG("IH: CP EOP\n");
4833 
4834 	if (adev->enable_mes && doorbell_offset) {
4835 		struct amdgpu_userq_fence_driver *fence_drv = NULL;
4836 		struct xarray *xa = &adev->userq_xa;
4837 		unsigned long flags;
4838 
4839 		xa_lock_irqsave(xa, flags);
4840 		fence_drv = xa_load(xa, doorbell_offset);
4841 		if (fence_drv)
4842 			amdgpu_userq_fence_driver_process(fence_drv);
4843 		xa_unlock_irqrestore(xa, flags);
4844 	} else {
4845 		me_id = (entry->ring_id & 0x0c) >> 2;
4846 		pipe_id = (entry->ring_id & 0x03) >> 0;
4847 		queue_id = (entry->ring_id & 0x70) >> 4;
4848 
4849 		switch (me_id) {
4850 		case 0:
4851 			if (pipe_id == 0)
4852 				amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4853 			else
4854 				amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
4855 			break;
4856 		case 1:
4857 		case 2:
4858 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4859 				ring = &adev->gfx.compute_ring[i];
4860 				/* Per-queue interrupt is supported for MEC starting from VI.
4861 				 * The interrupt can only be enabled/disabled per pipe instead
4862 				 * of per queue.
4863 				 */
4864 				if ((ring->me == me_id) &&
4865 				    (ring->pipe == pipe_id) &&
4866 				    (ring->queue == queue_id))
4867 					amdgpu_fence_process(ring);
4868 			}
4869 			break;
4870 		}
4871 	}
4872 
4873 	return 0;
4874 }
4875 
4876 static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4877 					      struct amdgpu_irq_src *source,
4878 					      unsigned int type,
4879 					      enum amdgpu_interrupt_state state)
4880 {
4881 	u32 cp_int_cntl_reg, cp_int_cntl;
4882 	int i, j;
4883 
4884 	switch (state) {
4885 	case AMDGPU_IRQ_STATE_DISABLE:
4886 	case AMDGPU_IRQ_STATE_ENABLE:
4887 		for (i = 0; i < adev->gfx.me.num_me; i++) {
4888 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
4889 				cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
4890 
4891 				if (cp_int_cntl_reg) {
4892 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
4893 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4894 								    PRIV_REG_INT_ENABLE,
4895 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4896 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
4897 				}
4898 			}
4899 		}
4900 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4901 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4902 				/* MECs start at 1 */
4903 				cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j);
4904 
4905 				if (cp_int_cntl_reg) {
4906 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
4907 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4908 								    PRIV_REG_INT_ENABLE,
4909 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4910 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
4911 				}
4912 			}
4913 		}
4914 		break;
4915 	default:
4916 		break;
4917 	}
4918 
4919 	return 0;
4920 }
4921 
4922 static int gfx_v12_0_set_bad_op_fault_state(struct amdgpu_device *adev,
4923 					    struct amdgpu_irq_src *source,
4924 					    unsigned type,
4925 					    enum amdgpu_interrupt_state state)
4926 {
4927 	u32 cp_int_cntl_reg, cp_int_cntl;
4928 	int i, j;
4929 
4930 	switch (state) {
4931 	case AMDGPU_IRQ_STATE_DISABLE:
4932 	case AMDGPU_IRQ_STATE_ENABLE:
4933 		for (i = 0; i < adev->gfx.me.num_me; i++) {
4934 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
4935 				cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
4936 
4937 				if (cp_int_cntl_reg) {
4938 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
4939 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4940 								    OPCODE_ERROR_INT_ENABLE,
4941 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4942 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
4943 				}
4944 			}
4945 		}
4946 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4947 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4948 				/* MECs start at 1 */
4949 				cp_int_cntl_reg = gfx_v12_0_get_cpc_int_cntl(adev, i + 1, j);
4950 
4951 				if (cp_int_cntl_reg) {
4952 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
4953 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4954 								    OPCODE_ERROR_INT_ENABLE,
4955 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4956 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
4957 				}
4958 			}
4959 		}
4960 		break;
4961 	default:
4962 		break;
4963 	}
4964 	return 0;
4965 }
4966 
4967 static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4968 					       struct amdgpu_irq_src *source,
4969 					       unsigned int type,
4970 					       enum amdgpu_interrupt_state state)
4971 {
4972 	u32 cp_int_cntl_reg, cp_int_cntl;
4973 	int i, j;
4974 
4975 	switch (state) {
4976 	case AMDGPU_IRQ_STATE_DISABLE:
4977 	case AMDGPU_IRQ_STATE_ENABLE:
4978 		for (i = 0; i < adev->gfx.me.num_me; i++) {
4979 			for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
4980 				cp_int_cntl_reg = gfx_v12_0_get_cpg_int_cntl(adev, i, j);
4981 
4982 				if (cp_int_cntl_reg) {
4983 					cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
4984 					cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4985 								    PRIV_INSTR_INT_ENABLE,
4986 								    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4987 					WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
4988 				}
4989 			}
4990 		}
4991 		break;
4992 	default:
4993 		break;
4994 	}
4995 
4996 	return 0;
4997 }
4998 
4999 static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
5000 					struct amdgpu_iv_entry *entry)
5001 {
5002 	u8 me_id, pipe_id, queue_id;
5003 	struct amdgpu_ring *ring;
5004 	int i;
5005 
5006 	me_id = (entry->ring_id & 0x0c) >> 2;
5007 	pipe_id = (entry->ring_id & 0x03) >> 0;
5008 	queue_id = (entry->ring_id & 0x70) >> 4;
5009 
5010 	if (!adev->gfx.disable_kq) {
5011 		switch (me_id) {
5012 		case 0:
5013 			for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5014 				ring = &adev->gfx.gfx_ring[i];
5015 				if (ring->me == me_id && ring->pipe == pipe_id &&
5016 				    ring->queue == queue_id)
5017 					drm_sched_fault(&ring->sched);
5018 			}
5019 			break;
5020 		case 1:
5021 		case 2:
5022 			for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5023 				ring = &adev->gfx.compute_ring[i];
5024 				if (ring->me == me_id && ring->pipe == pipe_id &&
5025 				    ring->queue == queue_id)
5026 					drm_sched_fault(&ring->sched);
5027 			}
5028 			break;
5029 		default:
5030 			BUG();
5031 			break;
5032 		}
5033 	}
5034 }
5035 
5036 static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev,
5037 				  struct amdgpu_irq_src *source,
5038 				  struct amdgpu_iv_entry *entry)
5039 {
5040 	DRM_ERROR("Illegal register access in command stream\n");
5041 	gfx_v12_0_handle_priv_fault(adev, entry);
5042 	return 0;
5043 }
5044 
5045 static int gfx_v12_0_bad_op_irq(struct amdgpu_device *adev,
5046 				struct amdgpu_irq_src *source,
5047 				struct amdgpu_iv_entry *entry)
5048 {
5049 	DRM_ERROR("Illegal opcode in command stream\n");
5050 	gfx_v12_0_handle_priv_fault(adev, entry);
5051 	return 0;
5052 }
5053 
5054 static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev,
5055 				   struct amdgpu_irq_src *source,
5056 				   struct amdgpu_iv_entry *entry)
5057 {
5058 	DRM_ERROR("Illegal instruction in command stream\n");
5059 	gfx_v12_0_handle_priv_fault(adev, entry);
5060 	return 0;
5061 }
5062 
5063 static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring)
5064 {
5065 	const unsigned int gcr_cntl =
5066 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
5067 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
5068 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
5069 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
5070 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
5071 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
5072 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
5073 			PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
5074 
5075 	/* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
5076 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
5077 	amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
5078 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
5079 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
5080 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
5081 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
5082 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
5083 	amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
5084 }
5085 
5086 static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
5087 {
5088 	/* Header itself is a NOP packet */
5089 	if (num_nop == 1) {
5090 		amdgpu_ring_write(ring, ring->funcs->nop);
5091 		return;
5092 	}
5093 
5094 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
5095 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
5096 
5097 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
5098 	amdgpu_ring_insert_nop(ring, num_nop - 1);
5099 }
5100 
5101 static void gfx_v12_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
5102 {
5103 	/* Emit the cleaner shader */
5104 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
5105 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
5106 }
5107 
5108 static void gfx_v12_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
5109 {
5110 	struct amdgpu_device *adev = ip_block->adev;
5111 	uint32_t i, j, k, reg, index = 0;
5112 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
5113 
5114 	if (!adev->gfx.ip_dump_core)
5115 		return;
5116 
5117 	for (i = 0; i < reg_count; i++)
5118 		drm_printf(p, "%-50s \t 0x%08x\n",
5119 			   gc_reg_list_12_0[i].reg_name,
5120 			   adev->gfx.ip_dump_core[i]);
5121 
5122 	/* print compute queue registers for all instances */
5123 	if (!adev->gfx.ip_dump_compute_queues)
5124 		return;
5125 
5126 	reg_count = ARRAY_SIZE(gc_cp_reg_list_12);
5127 	drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
5128 		   adev->gfx.mec.num_mec,
5129 		   adev->gfx.mec.num_pipe_per_mec,
5130 		   adev->gfx.mec.num_queue_per_pipe);
5131 
5132 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
5133 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
5134 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
5135 				drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
5136 				for (reg = 0; reg < reg_count; reg++) {
5137 					drm_printf(p, "%-50s \t 0x%08x\n",
5138 						   gc_cp_reg_list_12[reg].reg_name,
5139 						   adev->gfx.ip_dump_compute_queues[index + reg]);
5140 				}
5141 				index += reg_count;
5142 			}
5143 		}
5144 	}
5145 
5146 	/* print gfx queue registers for all instances */
5147 	if (!adev->gfx.ip_dump_gfx_queues)
5148 		return;
5149 
5150 	index = 0;
5151 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12);
5152 	drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n",
5153 		   adev->gfx.me.num_me,
5154 		   adev->gfx.me.num_pipe_per_me,
5155 		   adev->gfx.me.num_queue_per_pipe);
5156 
5157 	for (i = 0; i < adev->gfx.me.num_me; i++) {
5158 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
5159 			for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
5160 				drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k);
5161 				for (reg = 0; reg < reg_count; reg++) {
5162 					drm_printf(p, "%-50s \t 0x%08x\n",
5163 						   gc_gfx_queue_reg_list_12[reg].reg_name,
5164 						   adev->gfx.ip_dump_gfx_queues[index + reg]);
5165 				}
5166 				index += reg_count;
5167 			}
5168 		}
5169 	}
5170 }
5171 
5172 static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block)
5173 {
5174 	struct amdgpu_device *adev = ip_block->adev;
5175 	uint32_t i, j, k, reg, index = 0;
5176 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
5177 
5178 	if (!adev->gfx.ip_dump_core)
5179 		return;
5180 
5181 	amdgpu_gfx_off_ctrl(adev, false);
5182 	for (i = 0; i < reg_count; i++)
5183 		adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_12_0[i]));
5184 	amdgpu_gfx_off_ctrl(adev, true);
5185 
5186 	/* dump compute queue registers for all instances */
5187 	if (!adev->gfx.ip_dump_compute_queues)
5188 		return;
5189 
5190 	reg_count = ARRAY_SIZE(gc_cp_reg_list_12);
5191 	amdgpu_gfx_off_ctrl(adev, false);
5192 	mutex_lock(&adev->srbm_mutex);
5193 	for (i = 0; i < adev->gfx.mec.num_mec; i++) {
5194 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
5195 			for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
5196 				/* ME0 is for GFX so start from 1 for CP */
5197 				soc24_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
5198 				for (reg = 0; reg < reg_count; reg++) {
5199 					adev->gfx.ip_dump_compute_queues[index + reg] =
5200 						RREG32(SOC15_REG_ENTRY_OFFSET(
5201 							gc_cp_reg_list_12[reg]));
5202 				}
5203 				index += reg_count;
5204 			}
5205 		}
5206 	}
5207 	soc24_grbm_select(adev, 0, 0, 0, 0);
5208 	mutex_unlock(&adev->srbm_mutex);
5209 	amdgpu_gfx_off_ctrl(adev, true);
5210 
5211 	/* dump gfx queue registers for all instances */
5212 	if (!adev->gfx.ip_dump_gfx_queues)
5213 		return;
5214 
5215 	index = 0;
5216 	reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_12);
5217 	amdgpu_gfx_off_ctrl(adev, false);
5218 	mutex_lock(&adev->srbm_mutex);
5219 	for (i = 0; i < adev->gfx.me.num_me; i++) {
5220 		for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
5221 			for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
5222 				soc24_grbm_select(adev, i, j, k, 0);
5223 
5224 				for (reg = 0; reg < reg_count; reg++) {
5225 					adev->gfx.ip_dump_gfx_queues[index + reg] =
5226 						RREG32(SOC15_REG_ENTRY_OFFSET(
5227 							gc_gfx_queue_reg_list_12[reg]));
5228 				}
5229 				index += reg_count;
5230 			}
5231 		}
5232 	}
5233 	soc24_grbm_select(adev, 0, 0, 0, 0);
5234 	mutex_unlock(&adev->srbm_mutex);
5235 	amdgpu_gfx_off_ctrl(adev, true);
5236 }
5237 
5238 static bool gfx_v12_pipe_reset_support(struct amdgpu_device *adev)
5239 {
5240 	/* Disable the pipe reset until the CPFW fully support it.*/
5241 	dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
5242 	return false;
5243 }
5244 
5245 static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
5246 {
5247 	struct amdgpu_device *adev = ring->adev;
5248 	uint32_t reset_pipe = 0, clean_pipe = 0;
5249 	int r;
5250 
5251 	if (!gfx_v12_pipe_reset_support(adev))
5252 		return -EOPNOTSUPP;
5253 
5254 	gfx_v12_0_set_safe_mode(adev, 0);
5255 	mutex_lock(&adev->srbm_mutex);
5256 	soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5257 
5258 	switch (ring->pipe) {
5259 	case 0:
5260 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
5261 					   PFP_PIPE0_RESET, 1);
5262 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
5263 					   ME_PIPE0_RESET, 1);
5264 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
5265 					   PFP_PIPE0_RESET, 0);
5266 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
5267 					   ME_PIPE0_RESET, 0);
5268 		break;
5269 	case 1:
5270 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
5271 					   PFP_PIPE1_RESET, 1);
5272 		reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
5273 					   ME_PIPE1_RESET, 1);
5274 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
5275 					   PFP_PIPE1_RESET, 0);
5276 		clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
5277 					   ME_PIPE1_RESET, 0);
5278 		break;
5279 	default:
5280 		break;
5281 	}
5282 
5283 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
5284 	WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
5285 
5286 	r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
5287 					RS64_FW_UC_START_ADDR_LO;
5288 	soc24_grbm_select(adev, 0, 0, 0, 0);
5289 	mutex_unlock(&adev->srbm_mutex);
5290 	gfx_v12_0_unset_safe_mode(adev, 0);
5291 
5292 	dev_info(adev->dev, "The ring %s pipe reset: %s\n", ring->name,
5293 			r == 0 ? "successfully" : "failed");
5294 	/* Sometimes the ME start pc counter can't cache correctly, so the
5295 	 * PC check only as a reference and pipe reset result rely on the
5296 	 * later ring test.
5297 	 */
5298 	return 0;
5299 }
5300 
5301 static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
5302 			       unsigned int vmid,
5303 			       struct amdgpu_fence *timedout_fence)
5304 {
5305 	struct amdgpu_device *adev = ring->adev;
5306 	int r;
5307 
5308 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
5309 
5310 	r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false, 0);
5311 	if (r) {
5312 		dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
5313 		r = gfx_v12_reset_gfx_pipe(ring);
5314 		if (r)
5315 			return r;
5316 	}
5317 
5318 	r = gfx_v12_0_kgq_init_queue(ring, true);
5319 	if (r) {
5320 		dev_err(adev->dev, "failed to init kgq\n");
5321 		return r;
5322 	}
5323 
5324 	r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
5325 	if (r) {
5326 		dev_err(adev->dev, "failed to remap kgq\n");
5327 		return r;
5328 	}
5329 
5330 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
5331 }
5332 
5333 static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
5334 {
5335 	struct amdgpu_device *adev = ring->adev;
5336 	uint32_t reset_pipe = 0, clean_pipe = 0;
5337 	int r = 0;
5338 
5339 	if (!gfx_v12_pipe_reset_support(adev))
5340 		return -EOPNOTSUPP;
5341 
5342 	gfx_v12_0_set_safe_mode(adev, 0);
5343 	mutex_lock(&adev->srbm_mutex);
5344 	soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5345 
5346 	reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
5347 	clean_pipe = reset_pipe;
5348 
5349 	if (adev->gfx.rs64_enable) {
5350 		switch (ring->pipe) {
5351 		case 0:
5352 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
5353 						   MEC_PIPE0_RESET, 1);
5354 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
5355 						   MEC_PIPE0_RESET, 0);
5356 			break;
5357 		case 1:
5358 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
5359 						   MEC_PIPE1_RESET, 1);
5360 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
5361 						   MEC_PIPE1_RESET, 0);
5362 			break;
5363 		case 2:
5364 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
5365 						   MEC_PIPE2_RESET, 1);
5366 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
5367 						   MEC_PIPE2_RESET, 0);
5368 			break;
5369 		case 3:
5370 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
5371 						   MEC_PIPE3_RESET, 1);
5372 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
5373 						   MEC_PIPE3_RESET, 0);
5374 			break;
5375 		default:
5376 			break;
5377 		}
5378 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
5379 		WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
5380 		r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
5381 				RS64_FW_UC_START_ADDR_LO;
5382 	} else {
5383 		switch (ring->pipe) {
5384 		case 0:
5385 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
5386 							   MEC_ME1_PIPE0_RESET, 1);
5387 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
5388 							   MEC_ME1_PIPE0_RESET, 0);
5389 			break;
5390 		case 1:
5391 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
5392 							   MEC_ME1_PIPE1_RESET, 1);
5393 			clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
5394 							   MEC_ME1_PIPE1_RESET, 0);
5395 			break;
5396 		default:
5397 		break;
5398 		}
5399 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
5400 		WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
5401 		/* Doesn't find the F32 MEC instruction pointer register, and suppose
5402 		 * the driver won't run into the F32 mode.
5403 		 */
5404 	}
5405 
5406 	soc24_grbm_select(adev, 0, 0, 0, 0);
5407 	mutex_unlock(&adev->srbm_mutex);
5408 	gfx_v12_0_unset_safe_mode(adev, 0);
5409 
5410 	dev_info(adev->dev, "The ring %s pipe resets: %s\n", ring->name,
5411 			r == 0 ? "successfully" : "failed");
5412 	/* Need the ring test to verify the pipe reset result.*/
5413 	return 0;
5414 }
5415 
5416 static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
5417 			       unsigned int vmid,
5418 			       struct amdgpu_fence *timedout_fence)
5419 {
5420 	struct amdgpu_device *adev = ring->adev;
5421 	int r;
5422 
5423 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
5424 
5425 	r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true, 0);
5426 	if (r) {
5427 		dev_warn(adev->dev, "fail(%d) to reset kcq  and try pipe reset\n", r);
5428 		r = gfx_v12_0_reset_compute_pipe(ring);
5429 		if (r)
5430 			return r;
5431 	}
5432 
5433 	r = gfx_v12_0_kcq_init_queue(ring, true);
5434 	if (r) {
5435 		dev_err(adev->dev, "failed to init kcq\n");
5436 		return r;
5437 	}
5438 	r = amdgpu_mes_map_legacy_queue(adev, ring, 0);
5439 	if (r) {
5440 		dev_err(adev->dev, "failed to remap kcq\n");
5441 		return r;
5442 	}
5443 
5444 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
5445 }
5446 
5447 static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
5448 {
5449 	amdgpu_gfx_profile_ring_begin_use(ring);
5450 
5451 	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
5452 }
5453 
5454 static void gfx_v12_0_ring_end_use(struct amdgpu_ring *ring)
5455 {
5456 	amdgpu_gfx_profile_ring_end_use(ring);
5457 
5458 	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
5459 }
5460 
5461 static const struct amd_ip_funcs gfx_v12_0_ip_funcs = {
5462 	.name = "gfx_v12_0",
5463 	.early_init = gfx_v12_0_early_init,
5464 	.late_init = gfx_v12_0_late_init,
5465 	.sw_init = gfx_v12_0_sw_init,
5466 	.sw_fini = gfx_v12_0_sw_fini,
5467 	.hw_init = gfx_v12_0_hw_init,
5468 	.hw_fini = gfx_v12_0_hw_fini,
5469 	.suspend = gfx_v12_0_suspend,
5470 	.resume = gfx_v12_0_resume,
5471 	.is_idle = gfx_v12_0_is_idle,
5472 	.wait_for_idle = gfx_v12_0_wait_for_idle,
5473 	.set_clockgating_state = gfx_v12_0_set_clockgating_state,
5474 	.set_powergating_state = gfx_v12_0_set_powergating_state,
5475 	.get_clockgating_state = gfx_v12_0_get_clockgating_state,
5476 	.dump_ip_state = gfx_v12_ip_dump,
5477 	.print_ip_state = gfx_v12_ip_print,
5478 };
5479 
5480 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
5481 	.type = AMDGPU_RING_TYPE_GFX,
5482 	.align_mask = 0xff,
5483 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5484 	.support_64bit_ptrs = true,
5485 	.secure_submission_supported = true,
5486 	.get_rptr = gfx_v12_0_ring_get_rptr_gfx,
5487 	.get_wptr = gfx_v12_0_ring_get_wptr_gfx,
5488 	.set_wptr = gfx_v12_0_ring_set_wptr_gfx,
5489 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
5490 		5 + /* COND_EXEC */
5491 		7 + /* PIPELINE_SYNC */
5492 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5493 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5494 		2 + /* VM_FLUSH */
5495 		8 + /* FENCE for VM_FLUSH */
5496 		5 + /* COND_EXEC */
5497 		7 + /* HDP_flush */
5498 		4 + /* VGT_flush */
5499 		31 + /*	DE_META */
5500 		3 + /* CNTX_CTRL */
5501 		5 + /* HDP_INVL */
5502 		8 + 8 + /* FENCE x2 */
5503 		8 + /* gfx_v12_0_emit_mem_sync */
5504 		2, /* gfx_v12_0_ring_emit_cleaner_shader */
5505 	.emit_ib_size =	4, /* gfx_v12_0_ring_emit_ib_gfx */
5506 	.emit_ib = gfx_v12_0_ring_emit_ib_gfx,
5507 	.emit_fence = gfx_v12_0_ring_emit_fence,
5508 	.emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync,
5509 	.emit_vm_flush = gfx_v12_0_ring_emit_vm_flush,
5510 	.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
5511 	.test_ring = gfx_v12_0_ring_test_ring,
5512 	.test_ib = gfx_v12_0_ring_test_ib,
5513 	.insert_nop = gfx_v12_ring_insert_nop,
5514 	.pad_ib = amdgpu_ring_generic_pad_ib,
5515 	.emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl,
5516 	.init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec,
5517 	.preempt_ib = gfx_v12_0_ring_preempt_ib,
5518 	.emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl,
5519 	.emit_wreg = gfx_v12_0_ring_emit_wreg,
5520 	.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
5521 	.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
5522 	.emit_mem_sync = gfx_v12_0_emit_mem_sync,
5523 	.reset = gfx_v12_0_reset_kgq,
5524 	.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
5525 	.begin_use = gfx_v12_0_ring_begin_use,
5526 	.end_use = gfx_v12_0_ring_end_use,
5527 };
5528 
5529 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
5530 	.type = AMDGPU_RING_TYPE_COMPUTE,
5531 	.align_mask = 0xff,
5532 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5533 	.support_64bit_ptrs = true,
5534 	.get_rptr = gfx_v12_0_ring_get_rptr_compute,
5535 	.get_wptr = gfx_v12_0_ring_get_wptr_compute,
5536 	.set_wptr = gfx_v12_0_ring_set_wptr_compute,
5537 	.emit_frame_size =
5538 		7 + /* gfx_v12_0_ring_emit_hdp_flush */
5539 		5 + /* hdp invalidate */
5540 		7 + /* gfx_v12_0_ring_emit_pipeline_sync */
5541 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5542 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5543 		2 + /* gfx_v12_0_ring_emit_vm_flush */
5544 		8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */
5545 		8 + /* gfx_v12_0_emit_mem_sync */
5546 		2, /* gfx_v12_0_ring_emit_cleaner_shader */
5547 	.emit_ib_size =	7, /* gfx_v12_0_ring_emit_ib_compute */
5548 	.emit_ib = gfx_v12_0_ring_emit_ib_compute,
5549 	.emit_fence = gfx_v12_0_ring_emit_fence,
5550 	.emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync,
5551 	.emit_vm_flush = gfx_v12_0_ring_emit_vm_flush,
5552 	.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
5553 	.test_ring = gfx_v12_0_ring_test_ring,
5554 	.test_ib = gfx_v12_0_ring_test_ib,
5555 	.insert_nop = gfx_v12_ring_insert_nop,
5556 	.pad_ib = amdgpu_ring_generic_pad_ib,
5557 	.emit_wreg = gfx_v12_0_ring_emit_wreg,
5558 	.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
5559 	.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
5560 	.emit_mem_sync = gfx_v12_0_emit_mem_sync,
5561 	.reset = gfx_v12_0_reset_kcq,
5562 	.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
5563 	.begin_use = gfx_v12_0_ring_begin_use,
5564 	.end_use = gfx_v12_0_ring_end_use,
5565 };
5566 
5567 static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
5568 	.type = AMDGPU_RING_TYPE_KIQ,
5569 	.align_mask = 0xff,
5570 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5571 	.support_64bit_ptrs = true,
5572 	.get_rptr = gfx_v12_0_ring_get_rptr_compute,
5573 	.get_wptr = gfx_v12_0_ring_get_wptr_compute,
5574 	.set_wptr = gfx_v12_0_ring_set_wptr_compute,
5575 	.emit_frame_size =
5576 		7 + /* gfx_v12_0_ring_emit_hdp_flush */
5577 		5 + /*hdp invalidate */
5578 		7 + /* gfx_v12_0_ring_emit_pipeline_sync */
5579 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5580 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5581 		2 + /* gfx_v12_0_ring_emit_vm_flush */
5582 		8 + 8 + 8, /* gfx_v12_0_ring_emit_fence_kiq x3 for user fence, vm fence */
5583 	.emit_ib_size =	7, /* gfx_v12_0_ring_emit_ib_compute */
5584 	.emit_ib = gfx_v12_0_ring_emit_ib_compute,
5585 	.emit_fence = gfx_v12_0_ring_emit_fence_kiq,
5586 	.test_ring = gfx_v12_0_ring_test_ring,
5587 	.test_ib = gfx_v12_0_ring_test_ib,
5588 	.insert_nop = amdgpu_ring_insert_nop,
5589 	.pad_ib = amdgpu_ring_generic_pad_ib,
5590 	.emit_rreg = gfx_v12_0_ring_emit_rreg,
5591 	.emit_wreg = gfx_v12_0_ring_emit_wreg,
5592 	.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
5593 	.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
5594 	.emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
5595 };
5596 
5597 static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev)
5598 {
5599 	int i;
5600 
5601 	adev->gfx.kiq[0].ring.funcs = &gfx_v12_0_ring_funcs_kiq;
5602 
5603 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5604 		adev->gfx.gfx_ring[i].funcs = &gfx_v12_0_ring_funcs_gfx;
5605 
5606 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5607 		adev->gfx.compute_ring[i].funcs = &gfx_v12_0_ring_funcs_compute;
5608 }
5609 
5610 static const struct amdgpu_irq_src_funcs gfx_v12_0_eop_irq_funcs = {
5611 	.set = gfx_v12_0_set_eop_interrupt_state,
5612 	.process = gfx_v12_0_eop_irq,
5613 };
5614 
5615 static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = {
5616 	.set = gfx_v12_0_set_priv_reg_fault_state,
5617 	.process = gfx_v12_0_priv_reg_irq,
5618 };
5619 
5620 static const struct amdgpu_irq_src_funcs gfx_v12_0_bad_op_irq_funcs = {
5621 	.set = gfx_v12_0_set_bad_op_fault_state,
5622 	.process = gfx_v12_0_bad_op_irq,
5623 };
5624 
5625 static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = {
5626 	.set = gfx_v12_0_set_priv_inst_fault_state,
5627 	.process = gfx_v12_0_priv_inst_irq,
5628 };
5629 
5630 static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev)
5631 {
5632 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5633 	adev->gfx.eop_irq.funcs = &gfx_v12_0_eop_irq_funcs;
5634 
5635 	adev->gfx.priv_reg_irq.num_types = 1;
5636 	adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs;
5637 
5638 	adev->gfx.bad_op_irq.num_types = 1;
5639 	adev->gfx.bad_op_irq.funcs = &gfx_v12_0_bad_op_irq_funcs;
5640 
5641 	adev->gfx.priv_inst_irq.num_types = 1;
5642 	adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs;
5643 }
5644 
5645 static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev)
5646 {
5647 	if (adev->flags & AMD_IS_APU)
5648 		adev->gfx.imu.mode = MISSION_MODE;
5649 	else
5650 		adev->gfx.imu.mode = DEBUG_MODE;
5651 
5652 	adev->gfx.imu.funcs = &gfx_v12_0_imu_funcs;
5653 }
5654 
5655 static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev)
5656 {
5657 	adev->gfx.rlc.funcs = &gfx_v12_0_rlc_funcs;
5658 }
5659 
5660 static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev)
5661 {
5662 	/* set gfx eng mqd */
5663 	adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
5664 		sizeof(struct v12_gfx_mqd);
5665 	adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
5666 		gfx_v12_0_gfx_mqd_init;
5667 	/* set compute eng mqd */
5668 	adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
5669 		sizeof(struct v12_compute_mqd);
5670 	adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
5671 		gfx_v12_0_compute_mqd_init;
5672 }
5673 
5674 static void gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
5675 							  u32 bitmap)
5676 {
5677 	u32 data;
5678 
5679 	if (!bitmap)
5680 		return;
5681 
5682 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5683 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5684 
5685 	WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
5686 }
5687 
5688 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
5689 {
5690 	u32 data, wgp_bitmask;
5691 	data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
5692 	data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
5693 
5694 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5695 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5696 
5697 	wgp_bitmask =
5698 		amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
5699 
5700 	return (~data) & wgp_bitmask;
5701 }
5702 
5703 static u32 gfx_v12_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
5704 {
5705 	u32 wgp_idx, wgp_active_bitmap;
5706 	u32 cu_bitmap_per_wgp, cu_active_bitmap;
5707 
5708 	wgp_active_bitmap = gfx_v12_0_get_wgp_active_bitmap_per_sh(adev);
5709 	cu_active_bitmap = 0;
5710 
5711 	for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
5712 		/* if there is one WGP enabled, it means 2 CUs will be enabled */
5713 		cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
5714 		if (wgp_active_bitmap & (1 << wgp_idx))
5715 			cu_active_bitmap |= cu_bitmap_per_wgp;
5716 	}
5717 
5718 	return cu_active_bitmap;
5719 }
5720 
5721 static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev,
5722 				 struct amdgpu_cu_info *cu_info)
5723 {
5724 	int i, j, k, counter, active_cu_number = 0;
5725 	u32 mask, bitmap;
5726 	unsigned disable_masks[8 * 2];
5727 
5728 	if (!adev || !cu_info)
5729 		return -EINVAL;
5730 
5731 	amdgpu_gfx_parse_disable_cu(adev, disable_masks, 8, 2);
5732 
5733 	mutex_lock(&adev->grbm_idx_mutex);
5734 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5735 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5736 			bitmap = i * adev->gfx.config.max_sh_per_se + j;
5737 			if (!((gfx_v12_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
5738 				continue;
5739 			mask = 1;
5740 			counter = 0;
5741 			gfx_v12_0_select_se_sh(adev, i, j, 0xffffffff, 0);
5742 			if (i < 8 && j < 2)
5743 				gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh(
5744 					adev, disable_masks[i * 2 + j]);
5745 			bitmap = gfx_v12_0_get_cu_active_bitmap_per_sh(adev);
5746 
5747 			/**
5748 			 * GFX12 could support more than 4 SEs, while the bitmap
5749 			 * in cu_info struct is 4x4 and ioctl interface struct
5750 			 * drm_amdgpu_info_device should keep stable.
5751 			 * So we use last two columns of bitmap to store cu mask for
5752 			 * SEs 4 to 7, the layout of the bitmap is as below:
5753 			 *    SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
5754 			 *    SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
5755 			 *    SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
5756 			 *    SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
5757 			 *    SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
5758 			 *    SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
5759 			 *    SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
5760 			 *    SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
5761 			 */
5762 			cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
5763 
5764 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5765 				if (bitmap & mask)
5766 					counter++;
5767 
5768 				mask <<= 1;
5769 			}
5770 			active_cu_number += counter;
5771 		}
5772 	}
5773 	gfx_v12_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
5774 	mutex_unlock(&adev->grbm_idx_mutex);
5775 
5776 	cu_info->number = active_cu_number;
5777 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5778 
5779 	return 0;
5780 }
5781 
5782 const struct amdgpu_ip_block_version gfx_v12_0_ip_block = {
5783 	.type = AMD_IP_BLOCK_TYPE_GFX,
5784 	.major = 12,
5785 	.minor = 0,
5786 	.rev = 0,
5787 	.funcs = &gfx_v12_0_ip_funcs,
5788 };
5789