1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41
42 #include "soc15_common.h"
43 #include "clearstate_gfx9.h"
44 #include "v9_structs.h"
45
46 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47
48 #include "amdgpu_ras.h"
49
50 #include "amdgpu_ring_mux.h"
51 #include "gfx_v9_4.h"
52 #include "gfx_v9_0.h"
53 #include "gfx_v9_0_cleaner_shader.h"
54 #include "gfx_v9_4_2.h"
55
56 #include "asic_reg/pwr/pwr_10_0_offset.h"
57 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
58 #include "asic_reg/gc/gc_9_0_default.h"
59
60 #define GFX9_NUM_GFX_RINGS 1
61 #define GFX9_NUM_SW_GFX_RINGS 2
62 #define GFX9_MEC_HPD_SIZE 4096
63 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
64 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
65
66 #define mmGCEA_PROBE_MAP 0x070c
67 #define mmGCEA_PROBE_MAP_BASE_IDX 0
68
69 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
72 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
73 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
74 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
75
76 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
79 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
80 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
81 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
82
83 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
86 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
87 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
88 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
89
90 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
91 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/raven_me.bin");
93 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
94 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
95 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
96
97 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
101 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
102 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
103 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
104
105 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
108 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
109 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
110 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
111 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
112
113 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
114 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
115
116 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
119 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
120 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
121
122 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
125 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
126 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
127 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
128
129 MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
130 MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
131 MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
132 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
133 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
134
135 #define mmTCP_CHAN_STEER_0_ARCT 0x0b03
136 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
137 #define mmTCP_CHAN_STEER_1_ARCT 0x0b04
138 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX 0
139 #define mmTCP_CHAN_STEER_2_ARCT 0x0b09
140 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX 0
141 #define mmTCP_CHAN_STEER_3_ARCT 0x0b0a
142 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX 0
143 #define mmTCP_CHAN_STEER_4_ARCT 0x0b0b
144 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX 0
145 #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
146 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
147
148 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
149 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
150 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
151 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
152
153 static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
154 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS),
155 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2),
156 SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT1),
157 SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT2),
158 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STALLED_STAT1),
159 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STALLED_STAT1),
160 SOC15_REG_ENTRY_STR(GC, 0, mmCP_BUSY_STAT),
161 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT),
162 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT),
163 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STATUS),
164 SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_ERROR),
165 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_BASE),
166 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_RPTR),
167 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR),
168 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_BASE),
169 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_RPTR),
170 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_WPTR),
171 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_BASE),
172 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_RPTR),
173 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_WPTR),
174 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_BASE),
175 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
176 SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
177 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_CMD_BUFSZ),
178 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_CMD_BUFSZ),
179 SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_CMD_BUFSZ),
180 SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_CMD_BUFSZ),
181 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_LO),
182 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_HI),
183 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BUFSZ),
184 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_LO),
185 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_HI),
186 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BUFSZ),
187 SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_LO),
188 SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_HI),
189 SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BUFSZ),
190 SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_LO),
191 SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_HI),
192 SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BUFSZ),
193 SOC15_REG_ENTRY_STR(GC, 0, mmCPF_UTCL1_STATUS),
194 SOC15_REG_ENTRY_STR(GC, 0, mmCPC_UTCL1_STATUS),
195 SOC15_REG_ENTRY_STR(GC, 0, mmCPG_UTCL1_STATUS),
196 SOC15_REG_ENTRY_STR(GC, 0, mmGDS_PROTECTION_FAULT),
197 SOC15_REG_ENTRY_STR(GC, 0, mmGDS_VM_PROTECTION_FAULT),
198 SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS),
199 SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_CNTL),
200 SOC15_REG_ENTRY_STR(GC, 0, mmPA_CL_CNTL_STATUS),
201 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_UTCL1_STATUS),
202 SOC15_REG_ENTRY_STR(GC, 0, mmRMI_UTCL1_STATUS),
203 SOC15_REG_ENTRY_STR(GC, 0, mmSQC_DCACHE_UTCL1_STATUS),
204 SOC15_REG_ENTRY_STR(GC, 0, mmSQC_ICACHE_UTCL1_STATUS),
205 SOC15_REG_ENTRY_STR(GC, 0, mmSQ_UTCL1_STATUS),
206 SOC15_REG_ENTRY_STR(GC, 0, mmTCP_UTCL1_STATUS),
207 SOC15_REG_ENTRY_STR(GC, 0, mmWD_UTCL1_STATUS),
208 SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL),
209 SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS),
210 SOC15_REG_ENTRY_STR(GC, 0, mmCP_DEBUG),
211 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_CNTL),
212 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_INSTR_PNTR),
213 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC1_INSTR_PNTR),
214 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC2_INSTR_PNTR),
215 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_INSTR_PNTR),
216 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_INSTR_PNTR),
217 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STATUS),
218 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_STAT),
219 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_COMMAND),
220 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_MESSAGE),
221 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_1),
222 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_2),
223 SOC15_REG_ENTRY_STR(GC, 0, mmSMU_RLC_RESPONSE),
224 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SAFE_MODE),
225 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
226 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
227 SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
228 /* SE status registers */
229 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
230 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
231 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
232 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
233 /* packet headers */
234 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
235 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
236 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
237 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
238 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
239 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
240 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
241 SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
242 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
243 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
244 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
245 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
246 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
247 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
248 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
249 SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
250 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
251 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
252 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
253 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
254 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
255 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
256 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
257 SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
258 };
259
260 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
261 /* compute queue registers */
262 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_VMID),
263 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ACTIVE),
264 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PERSISTENT_STATE),
265 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PIPE_PRIORITY),
266 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUEUE_PRIORITY),
267 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUANTUM),
268 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE),
269 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE_HI),
270 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_RPTR),
271 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
272 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
273 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
274 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_CONTROL),
275 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR),
276 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR_HI),
277 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_RPTR),
278 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_CONTROL),
279 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_REQUEST),
280 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
281 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI),
282 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_CONTROL),
283 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_RPTR),
284 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR),
285 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_EVENTS),
286 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_LO),
287 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_HI),
288 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_CONTROL),
289 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_OFFSET),
290 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_SIZE),
291 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_WG_STATE_OFFSET),
292 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_SIZE),
293 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GDS_RESOURCE_STATE),
294 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ERROR),
295 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR_MEM),
296 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
297 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
298 SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
299 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
300 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
301 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
302 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
303 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
304 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
305 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
306 SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
307 };
308
309 enum ta_ras_gfx_subblock {
310 /*CPC*/
311 TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
312 TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
313 TA_RAS_BLOCK__GFX_CPC_UCODE,
314 TA_RAS_BLOCK__GFX_DC_STATE_ME1,
315 TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
316 TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
317 TA_RAS_BLOCK__GFX_DC_STATE_ME2,
318 TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
319 TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
320 TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
321 /* CPF*/
322 TA_RAS_BLOCK__GFX_CPF_INDEX_START,
323 TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
324 TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
325 TA_RAS_BLOCK__GFX_CPF_TAG,
326 TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
327 /* CPG*/
328 TA_RAS_BLOCK__GFX_CPG_INDEX_START,
329 TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
330 TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
331 TA_RAS_BLOCK__GFX_CPG_TAG,
332 TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
333 /* GDS*/
334 TA_RAS_BLOCK__GFX_GDS_INDEX_START,
335 TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
336 TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
337 TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
338 TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
339 TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
340 TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
341 /* SPI*/
342 TA_RAS_BLOCK__GFX_SPI_SR_MEM,
343 /* SQ*/
344 TA_RAS_BLOCK__GFX_SQ_INDEX_START,
345 TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
346 TA_RAS_BLOCK__GFX_SQ_LDS_D,
347 TA_RAS_BLOCK__GFX_SQ_LDS_I,
348 TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
349 TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
350 /* SQC (3 ranges)*/
351 TA_RAS_BLOCK__GFX_SQC_INDEX_START,
352 /* SQC range 0*/
353 TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
354 TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
355 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
356 TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
357 TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
358 TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
359 TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
360 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
361 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
362 TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
363 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
364 /* SQC range 1*/
365 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
366 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
367 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
368 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
369 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
370 TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
371 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
372 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
373 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
374 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
375 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
376 TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
377 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
378 /* SQC range 2*/
379 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
380 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
381 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
382 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
383 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
384 TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
385 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
386 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
387 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
388 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
389 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
390 TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
391 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
392 TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
393 /* TA*/
394 TA_RAS_BLOCK__GFX_TA_INDEX_START,
395 TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
396 TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
397 TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
398 TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
399 TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
400 TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
401 /* TCA*/
402 TA_RAS_BLOCK__GFX_TCA_INDEX_START,
403 TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
404 TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
405 TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
406 /* TCC (5 sub-ranges)*/
407 TA_RAS_BLOCK__GFX_TCC_INDEX_START,
408 /* TCC range 0*/
409 TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
410 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
411 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
412 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
413 TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
414 TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
415 TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
416 TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
417 TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
418 TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
419 /* TCC range 1*/
420 TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
421 TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
422 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
423 TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
424 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
425 /* TCC range 2*/
426 TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
427 TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
428 TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
429 TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
430 TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
431 TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
432 TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
433 TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
434 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
435 TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
436 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
437 /* TCC range 3*/
438 TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
439 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
440 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
441 TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
442 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
443 /* TCC range 4*/
444 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
445 TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
446 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
447 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
448 TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
449 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
450 TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
451 /* TCI*/
452 TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
453 /* TCP*/
454 TA_RAS_BLOCK__GFX_TCP_INDEX_START,
455 TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
456 TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
457 TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
458 TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
459 TA_RAS_BLOCK__GFX_TCP_DB_RAM,
460 TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
461 TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
462 TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
463 /* TD*/
464 TA_RAS_BLOCK__GFX_TD_INDEX_START,
465 TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
466 TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
467 TA_RAS_BLOCK__GFX_TD_CS_FIFO,
468 TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
469 /* EA (3 sub-ranges)*/
470 TA_RAS_BLOCK__GFX_EA_INDEX_START,
471 /* EA range 0*/
472 TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
473 TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
474 TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
475 TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
476 TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
477 TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
478 TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
479 TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
480 TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
481 TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
482 /* EA range 1*/
483 TA_RAS_BLOCK__GFX_EA_INDEX1_START,
484 TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
485 TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
486 TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
487 TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
488 TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
489 TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
490 TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
491 TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
492 /* EA range 2*/
493 TA_RAS_BLOCK__GFX_EA_INDEX2_START,
494 TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
495 TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
496 TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
497 TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
498 TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
499 TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
500 /* UTC VM L2 bank*/
501 TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
502 /* UTC VM walker*/
503 TA_RAS_BLOCK__UTC_VML2_WALKER,
504 /* UTC ATC L2 2MB cache*/
505 TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
506 /* UTC ATC L2 4KB cache*/
507 TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
508 TA_RAS_BLOCK__GFX_MAX
509 };
510
511 struct ras_gfx_subblock {
512 unsigned char *name;
513 int ta_subblock;
514 int hw_supported_error_type;
515 int sw_supported_error_type;
516 };
517
518 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
519 [AMDGPU_RAS_BLOCK__##subblock] = { \
520 #subblock, \
521 TA_RAS_BLOCK__##subblock, \
522 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
523 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
524 }
525
526 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
527 AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
528 AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
529 AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
530 AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
531 AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
532 AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
533 AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
534 AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
535 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
536 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
537 AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
538 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
539 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
540 AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
541 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
542 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
543 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
544 0),
545 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
546 0),
547 AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
548 AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
549 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
550 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
551 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
552 AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
553 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
554 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
555 0, 0),
556 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
557 0),
558 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
559 0, 0),
560 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
561 0),
562 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
563 0, 0),
564 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
565 0),
566 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
567 1),
568 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
569 0, 0, 0),
570 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
571 0),
572 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
573 0),
574 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
575 0),
576 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
577 0),
578 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
579 0),
580 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
581 0, 0),
582 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
583 0),
584 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
585 0),
586 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
587 0, 0, 0),
588 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
589 0),
590 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
591 0),
592 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
593 0),
594 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
595 0),
596 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
597 0),
598 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
599 0, 0),
600 AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
601 0),
602 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
603 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
604 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
605 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
606 AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
607 AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
608 AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
609 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
610 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
611 1),
612 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
613 1),
614 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
615 1),
616 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
617 0),
618 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
619 0),
620 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
621 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
622 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
623 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
624 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
625 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
626 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
627 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
628 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
629 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
630 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
631 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
632 0),
633 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
634 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
635 0),
636 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
637 0, 0),
638 AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
639 0),
640 AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
641 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
642 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
643 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
644 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
645 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
646 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
647 AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
648 AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
649 AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
650 AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
651 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
652 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
653 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
654 AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
655 AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
656 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
657 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
658 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
659 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
660 AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
661 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
662 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
663 AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
664 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
665 AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
666 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
667 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
668 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
669 AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
670 AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
671 AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
672 AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
673 AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
674 };
675
676 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
677 {
678 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
679 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
680 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
681 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
682 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
683 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
684 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
685 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
686 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
687 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
688 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
689 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
690 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
691 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
692 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
693 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
694 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
695 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
696 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
697 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
698 };
699
700 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
701 {
702 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
703 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
704 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
705 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
706 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
707 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
708 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
709 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
710 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
711 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
712 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
713 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
714 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
715 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
716 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
717 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
718 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
719 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
720 };
721
722 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
723 {
724 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
725 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
726 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
727 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
728 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
729 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
730 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
731 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
732 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
733 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
734 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
735 };
736
737 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
738 {
739 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
740 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
741 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
742 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
743 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
744 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
745 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
746 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
747 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
748 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
749 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
750 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
751 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
752 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
753 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
754 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
755 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
756 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
757 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
758 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
759 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
760 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
761 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
762 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
763 };
764
765 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
766 {
767 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
768 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
769 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
770 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
771 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
772 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
773 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
774 };
775
776 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
777 {
778 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
779 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
780 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
781 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
782 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
783 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
784 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
785 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
786 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
787 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
788 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
789 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
790 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
791 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
792 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
793 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
794 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
795 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
796 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
797 };
798
799 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
800 {
801 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
802 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
803 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
804 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
805 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
806 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
807 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
808 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
809 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
810 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
811 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
812 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
813 };
814
815 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
816 {
817 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
818 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
819 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
820 };
821
822 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
823 {
824 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
825 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
826 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
827 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
828 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
829 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
830 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
831 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
832 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
833 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
834 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
835 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
836 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
837 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
838 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
839 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
840 };
841
842 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
843 {
844 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
845 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
846 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
847 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
848 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
849 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
850 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
851 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
852 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
853 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
854 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
855 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
856 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
857 };
858
859 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
860 {
861 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
862 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
863 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
864 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
865 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
866 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
867 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
868 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
869 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
870 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
871 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
872 };
873
874 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
875 {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
876 {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
877 };
878
879 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
880 {
881 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
882 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
883 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
884 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
885 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
886 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
887 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
888 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
889 };
890
891 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
892 {
893 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
894 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
895 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
896 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
897 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
898 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
899 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
900 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
901 };
902
903 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
904 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
905 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
906 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
907
908 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
909 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
910 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
911 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
912 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
913 struct amdgpu_cu_info *cu_info);
914 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
915 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
916 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
917 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
918 void *ras_error_status);
919 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
920 void *inject_if, uint32_t instance_mask);
921 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
922 static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
923 unsigned int vmid);
924 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
925 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
926
gfx_v9_0_kiq_set_resources(struct amdgpu_ring * kiq_ring,uint64_t queue_mask)927 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
928 uint64_t queue_mask)
929 {
930 struct amdgpu_device *adev = kiq_ring->adev;
931 u64 shader_mc_addr;
932
933 /* Cleaner shader MC address */
934 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
935
936 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
937 amdgpu_ring_write(kiq_ring,
938 PACKET3_SET_RESOURCES_VMID_MASK(0) |
939 /* vmid_mask:0* queue_type:0 (KIQ) */
940 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
941 amdgpu_ring_write(kiq_ring,
942 lower_32_bits(queue_mask)); /* queue mask lo */
943 amdgpu_ring_write(kiq_ring,
944 upper_32_bits(queue_mask)); /* queue mask hi */
945 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
946 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
947 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
948 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
949 }
950
gfx_v9_0_kiq_map_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring)951 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
952 struct amdgpu_ring *ring)
953 {
954 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
955 uint64_t wptr_addr = ring->wptr_gpu_addr;
956 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
957
958 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
959 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
960 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
961 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
962 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
963 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
964 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
965 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
966 /*queue_type: normal compute queue */
967 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
968 /* alloc format: all_on_one_pipe */
969 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
970 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
971 /* num_queues: must be 1 */
972 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
973 amdgpu_ring_write(kiq_ring,
974 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
975 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
976 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
977 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
978 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
979 }
980
gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)981 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
982 struct amdgpu_ring *ring,
983 enum amdgpu_unmap_queues_action action,
984 u64 gpu_addr, u64 seq)
985 {
986 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
987
988 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
989 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
990 PACKET3_UNMAP_QUEUES_ACTION(action) |
991 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
992 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
993 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
994 amdgpu_ring_write(kiq_ring,
995 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
996
997 if (action == PREEMPT_QUEUES_NO_UNMAP) {
998 amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask));
999 amdgpu_ring_write(kiq_ring, 0);
1000 amdgpu_ring_write(kiq_ring, 0);
1001
1002 } else {
1003 amdgpu_ring_write(kiq_ring, 0);
1004 amdgpu_ring_write(kiq_ring, 0);
1005 amdgpu_ring_write(kiq_ring, 0);
1006 }
1007 }
1008
gfx_v9_0_kiq_query_status(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,u64 addr,u64 seq)1009 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
1010 struct amdgpu_ring *ring,
1011 u64 addr,
1012 u64 seq)
1013 {
1014 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
1015
1016 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
1017 amdgpu_ring_write(kiq_ring,
1018 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
1019 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
1020 PACKET3_QUERY_STATUS_COMMAND(2));
1021 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
1022 amdgpu_ring_write(kiq_ring,
1023 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
1024 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
1025 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
1026 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
1027 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
1028 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
1029 }
1030
gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring * kiq_ring,uint16_t pasid,uint32_t flush_type,bool all_hub)1031 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
1032 uint16_t pasid, uint32_t flush_type,
1033 bool all_hub)
1034 {
1035 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
1036 amdgpu_ring_write(kiq_ring,
1037 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
1038 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
1039 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
1040 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
1041 }
1042
1043
gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring * kiq_ring,uint32_t queue_type,uint32_t me_id,uint32_t pipe_id,uint32_t queue_id,uint32_t xcc_id,uint32_t vmid)1044 static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
1045 uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
1046 uint32_t xcc_id, uint32_t vmid)
1047 {
1048 struct amdgpu_device *adev = kiq_ring->adev;
1049 unsigned i;
1050
1051 /* enter save mode */
1052 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1053 mutex_lock(&adev->srbm_mutex);
1054 soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0);
1055
1056 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1057 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
1058 WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
1059 /* wait till dequeue take effects */
1060 for (i = 0; i < adev->usec_timeout; i++) {
1061 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
1062 break;
1063 udelay(1);
1064 }
1065 if (i >= adev->usec_timeout)
1066 dev_err(adev->dev, "fail to wait on hqd deactive\n");
1067 } else {
1068 dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
1069 }
1070
1071 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
1072 mutex_unlock(&adev->srbm_mutex);
1073 /* exit safe mode */
1074 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1075 }
1076
1077 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
1078 .kiq_set_resources = gfx_v9_0_kiq_set_resources,
1079 .kiq_map_queues = gfx_v9_0_kiq_map_queues,
1080 .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
1081 .kiq_query_status = gfx_v9_0_kiq_query_status,
1082 .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
1083 .kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue,
1084 .set_resources_size = 8,
1085 .map_queues_size = 7,
1086 .unmap_queues_size = 6,
1087 .query_status_size = 7,
1088 .invalidate_tlbs_size = 2,
1089 };
1090
gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device * adev)1091 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
1092 {
1093 adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
1094 }
1095
gfx_v9_0_init_golden_registers(struct amdgpu_device * adev)1096 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
1097 {
1098 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1099 case IP_VERSION(9, 0, 1):
1100 soc15_program_register_sequence(adev,
1101 golden_settings_gc_9_0,
1102 ARRAY_SIZE(golden_settings_gc_9_0));
1103 soc15_program_register_sequence(adev,
1104 golden_settings_gc_9_0_vg10,
1105 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
1106 break;
1107 case IP_VERSION(9, 2, 1):
1108 soc15_program_register_sequence(adev,
1109 golden_settings_gc_9_2_1,
1110 ARRAY_SIZE(golden_settings_gc_9_2_1));
1111 soc15_program_register_sequence(adev,
1112 golden_settings_gc_9_2_1_vg12,
1113 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
1114 break;
1115 case IP_VERSION(9, 4, 0):
1116 soc15_program_register_sequence(adev,
1117 golden_settings_gc_9_0,
1118 ARRAY_SIZE(golden_settings_gc_9_0));
1119 soc15_program_register_sequence(adev,
1120 golden_settings_gc_9_0_vg20,
1121 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
1122 break;
1123 case IP_VERSION(9, 4, 1):
1124 soc15_program_register_sequence(adev,
1125 golden_settings_gc_9_4_1_arct,
1126 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
1127 break;
1128 case IP_VERSION(9, 2, 2):
1129 case IP_VERSION(9, 1, 0):
1130 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
1131 ARRAY_SIZE(golden_settings_gc_9_1));
1132 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1133 soc15_program_register_sequence(adev,
1134 golden_settings_gc_9_1_rv2,
1135 ARRAY_SIZE(golden_settings_gc_9_1_rv2));
1136 else
1137 soc15_program_register_sequence(adev,
1138 golden_settings_gc_9_1_rv1,
1139 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
1140 break;
1141 case IP_VERSION(9, 3, 0):
1142 soc15_program_register_sequence(adev,
1143 golden_settings_gc_9_1_rn,
1144 ARRAY_SIZE(golden_settings_gc_9_1_rn));
1145 return; /* for renoir, don't need common goldensetting */
1146 case IP_VERSION(9, 4, 2):
1147 gfx_v9_4_2_init_golden_registers(adev,
1148 adev->smuio.funcs->get_die_id(adev));
1149 break;
1150 default:
1151 break;
1152 }
1153
1154 if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
1155 (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)))
1156 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
1157 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
1158 }
1159
gfx_v9_0_write_data_to_reg(struct amdgpu_ring * ring,int eng_sel,bool wc,uint32_t reg,uint32_t val)1160 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1161 bool wc, uint32_t reg, uint32_t val)
1162 {
1163 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1164 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1165 WRITE_DATA_DST_SEL(0) |
1166 (wc ? WR_CONFIRM : 0));
1167 amdgpu_ring_write(ring, reg);
1168 amdgpu_ring_write(ring, 0);
1169 amdgpu_ring_write(ring, val);
1170 }
1171
gfx_v9_0_wait_reg_mem(struct amdgpu_ring * ring,int eng_sel,int mem_space,int opt,uint32_t addr0,uint32_t addr1,uint32_t ref,uint32_t mask,uint32_t inv)1172 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1173 int mem_space, int opt, uint32_t addr0,
1174 uint32_t addr1, uint32_t ref, uint32_t mask,
1175 uint32_t inv)
1176 {
1177 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1178 amdgpu_ring_write(ring,
1179 /* memory (1) or register (0) */
1180 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1181 WAIT_REG_MEM_OPERATION(opt) | /* wait */
1182 WAIT_REG_MEM_FUNCTION(3) | /* equal */
1183 WAIT_REG_MEM_ENGINE(eng_sel)));
1184
1185 if (mem_space)
1186 BUG_ON(addr0 & 0x3); /* Dword align */
1187 amdgpu_ring_write(ring, addr0);
1188 amdgpu_ring_write(ring, addr1);
1189 amdgpu_ring_write(ring, ref);
1190 amdgpu_ring_write(ring, mask);
1191 amdgpu_ring_write(ring, inv); /* poll interval */
1192 }
1193
gfx_v9_0_ring_test_ring(struct amdgpu_ring * ring)1194 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1195 {
1196 struct amdgpu_device *adev = ring->adev;
1197 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1198 uint32_t tmp = 0;
1199 unsigned i;
1200 int r;
1201
1202 WREG32(scratch, 0xCAFEDEAD);
1203 r = amdgpu_ring_alloc(ring, 3);
1204 if (r)
1205 return r;
1206
1207 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1208 amdgpu_ring_write(ring, scratch - PACKET3_SET_UCONFIG_REG_START);
1209 amdgpu_ring_write(ring, 0xDEADBEEF);
1210 amdgpu_ring_commit(ring);
1211
1212 for (i = 0; i < adev->usec_timeout; i++) {
1213 tmp = RREG32(scratch);
1214 if (tmp == 0xDEADBEEF)
1215 break;
1216 udelay(1);
1217 }
1218
1219 if (i >= adev->usec_timeout)
1220 r = -ETIMEDOUT;
1221 return r;
1222 }
1223
gfx_v9_0_ring_test_ib(struct amdgpu_ring * ring,long timeout)1224 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1225 {
1226 struct amdgpu_device *adev = ring->adev;
1227 struct amdgpu_ib ib;
1228 struct dma_fence *f = NULL;
1229
1230 unsigned index;
1231 uint64_t gpu_addr;
1232 uint32_t tmp;
1233 long r;
1234
1235 r = amdgpu_device_wb_get(adev, &index);
1236 if (r)
1237 return r;
1238
1239 gpu_addr = adev->wb.gpu_addr + (index * 4);
1240 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1241 memset(&ib, 0, sizeof(ib));
1242
1243 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
1244 if (r)
1245 goto err1;
1246
1247 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1248 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1249 ib.ptr[2] = lower_32_bits(gpu_addr);
1250 ib.ptr[3] = upper_32_bits(gpu_addr);
1251 ib.ptr[4] = 0xDEADBEEF;
1252 ib.length_dw = 5;
1253
1254 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1255 if (r)
1256 goto err2;
1257
1258 r = dma_fence_wait_timeout(f, false, timeout);
1259 if (r == 0) {
1260 r = -ETIMEDOUT;
1261 goto err2;
1262 } else if (r < 0) {
1263 goto err2;
1264 }
1265
1266 tmp = adev->wb.wb[index];
1267 if (tmp == 0xDEADBEEF)
1268 r = 0;
1269 else
1270 r = -EINVAL;
1271
1272 err2:
1273 amdgpu_ib_free(&ib, NULL);
1274 dma_fence_put(f);
1275 err1:
1276 amdgpu_device_wb_free(adev, index);
1277 return r;
1278 }
1279
1280
gfx_v9_0_free_microcode(struct amdgpu_device * adev)1281 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1282 {
1283 amdgpu_ucode_release(&adev->gfx.pfp_fw);
1284 amdgpu_ucode_release(&adev->gfx.me_fw);
1285 amdgpu_ucode_release(&adev->gfx.ce_fw);
1286 amdgpu_ucode_release(&adev->gfx.rlc_fw);
1287 amdgpu_ucode_release(&adev->gfx.mec_fw);
1288 amdgpu_ucode_release(&adev->gfx.mec2_fw);
1289
1290 kfree(adev->gfx.rlc.register_list_format);
1291 }
1292
gfx_v9_0_check_fw_write_wait(struct amdgpu_device * adev)1293 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1294 {
1295 adev->gfx.me_fw_write_wait = false;
1296 adev->gfx.mec_fw_write_wait = false;
1297
1298 if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
1299 (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) &&
1300 ((adev->gfx.mec_fw_version < 0x000001a5) ||
1301 (adev->gfx.mec_feature_version < 46) ||
1302 (adev->gfx.pfp_fw_version < 0x000000b7) ||
1303 (adev->gfx.pfp_feature_version < 46)))
1304 drm_warn_once(adev_to_drm(adev),
1305 "CP firmware version too old, please update!");
1306
1307 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1308 case IP_VERSION(9, 0, 1):
1309 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1310 (adev->gfx.me_feature_version >= 42) &&
1311 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
1312 (adev->gfx.pfp_feature_version >= 42))
1313 adev->gfx.me_fw_write_wait = true;
1314
1315 if ((adev->gfx.mec_fw_version >= 0x00000193) &&
1316 (adev->gfx.mec_feature_version >= 42))
1317 adev->gfx.mec_fw_write_wait = true;
1318 break;
1319 case IP_VERSION(9, 2, 1):
1320 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1321 (adev->gfx.me_feature_version >= 44) &&
1322 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
1323 (adev->gfx.pfp_feature_version >= 44))
1324 adev->gfx.me_fw_write_wait = true;
1325
1326 if ((adev->gfx.mec_fw_version >= 0x00000196) &&
1327 (adev->gfx.mec_feature_version >= 44))
1328 adev->gfx.mec_fw_write_wait = true;
1329 break;
1330 case IP_VERSION(9, 4, 0):
1331 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1332 (adev->gfx.me_feature_version >= 44) &&
1333 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
1334 (adev->gfx.pfp_feature_version >= 44))
1335 adev->gfx.me_fw_write_wait = true;
1336
1337 if ((adev->gfx.mec_fw_version >= 0x00000197) &&
1338 (adev->gfx.mec_feature_version >= 44))
1339 adev->gfx.mec_fw_write_wait = true;
1340 break;
1341 case IP_VERSION(9, 1, 0):
1342 case IP_VERSION(9, 2, 2):
1343 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1344 (adev->gfx.me_feature_version >= 42) &&
1345 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
1346 (adev->gfx.pfp_feature_version >= 42))
1347 adev->gfx.me_fw_write_wait = true;
1348
1349 if ((adev->gfx.mec_fw_version >= 0x00000192) &&
1350 (adev->gfx.mec_feature_version >= 42))
1351 adev->gfx.mec_fw_write_wait = true;
1352 break;
1353 default:
1354 adev->gfx.me_fw_write_wait = true;
1355 adev->gfx.mec_fw_write_wait = true;
1356 break;
1357 }
1358 }
1359
1360 struct amdgpu_gfxoff_quirk {
1361 u16 chip_vendor;
1362 u16 chip_device;
1363 u16 subsys_vendor;
1364 u16 subsys_device;
1365 u8 revision;
1366 };
1367
1368 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1369 /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1370 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1371 /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1372 { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1373 /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1374 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1375 /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
1376 { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
1377 /* https://bbs.openkylin.top/t/topic/171497 */
1378 { 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
1379 /* HP 705G4 DM with R5 2400G */
1380 { 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
1381 { 0, 0, 0, 0, 0 },
1382 };
1383
gfx_v9_0_should_disable_gfxoff(struct pci_dev * pdev)1384 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1385 {
1386 const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1387
1388 while (p && p->chip_device != 0) {
1389 if (pdev->vendor == p->chip_vendor &&
1390 pdev->device == p->chip_device &&
1391 pdev->subsystem_vendor == p->subsys_vendor &&
1392 pdev->subsystem_device == p->subsys_device &&
1393 pdev->revision == p->revision) {
1394 return true;
1395 }
1396 ++p;
1397 }
1398 return false;
1399 }
1400
is_raven_kicker(struct amdgpu_device * adev)1401 static bool is_raven_kicker(struct amdgpu_device *adev)
1402 {
1403 if (adev->pm.fw_version >= 0x41e2b)
1404 return true;
1405 else
1406 return false;
1407 }
1408
check_if_enlarge_doorbell_range(struct amdgpu_device * adev)1409 static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
1410 {
1411 if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) &&
1412 (adev->gfx.me_fw_version >= 0x000000a5) &&
1413 (adev->gfx.me_feature_version >= 52))
1414 return true;
1415 else
1416 return false;
1417 }
1418
gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device * adev)1419 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1420 {
1421 if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1422 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1423
1424 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1425 case IP_VERSION(9, 0, 1):
1426 case IP_VERSION(9, 2, 1):
1427 case IP_VERSION(9, 4, 0):
1428 break;
1429 case IP_VERSION(9, 2, 2):
1430 case IP_VERSION(9, 1, 0):
1431 if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1432 (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1433 ((!is_raven_kicker(adev) &&
1434 adev->gfx.rlc_fw_version < 531) ||
1435 (adev->gfx.rlc_feature_version < 1) ||
1436 !adev->gfx.rlc.is_rlc_v2_1))
1437 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1438
1439 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1440 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1441 AMD_PG_SUPPORT_CP |
1442 AMD_PG_SUPPORT_RLC_SMU_HS;
1443 break;
1444 case IP_VERSION(9, 3, 0):
1445 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1446 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1447 AMD_PG_SUPPORT_CP |
1448 AMD_PG_SUPPORT_RLC_SMU_HS;
1449 break;
1450 default:
1451 break;
1452 }
1453 }
1454
gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device * adev,char * chip_name)1455 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1456 char *chip_name)
1457 {
1458 int err;
1459
1460 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
1461 AMDGPU_UCODE_REQUIRED,
1462 "amdgpu/%s_pfp.bin", chip_name);
1463 if (err)
1464 goto out;
1465 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
1466
1467 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
1468 AMDGPU_UCODE_REQUIRED,
1469 "amdgpu/%s_me.bin", chip_name);
1470 if (err)
1471 goto out;
1472 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
1473
1474 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
1475 AMDGPU_UCODE_REQUIRED,
1476 "amdgpu/%s_ce.bin", chip_name);
1477 if (err)
1478 goto out;
1479 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
1480
1481 out:
1482 if (err) {
1483 amdgpu_ucode_release(&adev->gfx.pfp_fw);
1484 amdgpu_ucode_release(&adev->gfx.me_fw);
1485 amdgpu_ucode_release(&adev->gfx.ce_fw);
1486 }
1487 return err;
1488 }
1489
gfx_v9_0_init_rlc_microcode(struct amdgpu_device * adev,char * chip_name)1490 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1491 char *chip_name)
1492 {
1493 int err;
1494 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1495 uint16_t version_major;
1496 uint16_t version_minor;
1497 uint32_t smu_version;
1498
1499 /*
1500 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1501 * instead of picasso_rlc.bin.
1502 * Judgment method:
1503 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1504 * or revision >= 0xD8 && revision <= 0xDF
1505 * otherwise is PCO FP5
1506 */
1507 if (!strcmp(chip_name, "picasso") &&
1508 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1509 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1510 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1511 AMDGPU_UCODE_REQUIRED,
1512 "amdgpu/%s_rlc_am4.bin", chip_name);
1513 else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1514 (smu_version >= 0x41e2b))
1515 /**
1516 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1517 */
1518 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1519 AMDGPU_UCODE_REQUIRED,
1520 "amdgpu/%s_kicker_rlc.bin", chip_name);
1521 else
1522 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1523 AMDGPU_UCODE_REQUIRED,
1524 "amdgpu/%s_rlc.bin", chip_name);
1525 if (err)
1526 goto out;
1527
1528 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1529 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1530 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1531 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
1532 out:
1533 if (err)
1534 amdgpu_ucode_release(&adev->gfx.rlc_fw);
1535
1536 return err;
1537 }
1538
gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device * adev)1539 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
1540 {
1541 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
1542 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1543 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0))
1544 return false;
1545
1546 return true;
1547 }
1548
gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device * adev,char * chip_name)1549 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1550 char *chip_name)
1551 {
1552 int err;
1553
1554 if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1555 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1556 AMDGPU_UCODE_REQUIRED,
1557 "amdgpu/%s_sjt_mec.bin", chip_name);
1558 else
1559 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1560 AMDGPU_UCODE_REQUIRED,
1561 "amdgpu/%s_mec.bin", chip_name);
1562 if (err)
1563 goto out;
1564
1565 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
1566 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
1567
1568 if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1569 if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1570 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1571 AMDGPU_UCODE_REQUIRED,
1572 "amdgpu/%s_sjt_mec2.bin", chip_name);
1573 else
1574 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1575 AMDGPU_UCODE_REQUIRED,
1576 "amdgpu/%s_mec2.bin", chip_name);
1577 if (!err) {
1578 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
1579 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
1580 } else {
1581 err = 0;
1582 amdgpu_ucode_release(&adev->gfx.mec2_fw);
1583 }
1584 } else {
1585 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
1586 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
1587 }
1588
1589 gfx_v9_0_check_if_need_gfxoff(adev);
1590 gfx_v9_0_check_fw_write_wait(adev);
1591
1592 out:
1593 if (err)
1594 amdgpu_ucode_release(&adev->gfx.mec_fw);
1595 return err;
1596 }
1597
gfx_v9_0_init_microcode(struct amdgpu_device * adev)1598 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1599 {
1600 char ucode_prefix[30];
1601 int r;
1602
1603 DRM_DEBUG("\n");
1604 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
1605
1606 /* No CPG in Arcturus */
1607 if (adev->gfx.num_gfx_rings) {
1608 r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
1609 if (r)
1610 return r;
1611 }
1612
1613 r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
1614 if (r)
1615 return r;
1616
1617 r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
1618 if (r)
1619 return r;
1620
1621 return r;
1622 }
1623
gfx_v9_0_get_csb_size(struct amdgpu_device * adev)1624 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1625 {
1626 u32 count = 0;
1627 const struct cs_section_def *sect = NULL;
1628 const struct cs_extent_def *ext = NULL;
1629
1630 /* begin clear state */
1631 count += 2;
1632 /* context control state */
1633 count += 3;
1634
1635 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1636 for (ext = sect->section; ext->extent != NULL; ++ext) {
1637 if (sect->id == SECT_CONTEXT)
1638 count += 2 + ext->reg_count;
1639 else
1640 return 0;
1641 }
1642 }
1643
1644 /* end clear state */
1645 count += 2;
1646 /* clear state */
1647 count += 2;
1648
1649 return count;
1650 }
1651
gfx_v9_0_get_csb_buffer(struct amdgpu_device * adev,u32 * buffer)1652 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
1653 {
1654 u32 count = 0;
1655
1656 if (adev->gfx.rlc.cs_data == NULL)
1657 return;
1658 if (buffer == NULL)
1659 return;
1660
1661 count = amdgpu_gfx_csb_preamble_start(buffer);
1662 count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
1663 amdgpu_gfx_csb_preamble_end(buffer, count);
1664 }
1665
gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device * adev)1666 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1667 {
1668 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1669 uint32_t pg_always_on_cu_num = 2;
1670 uint32_t always_on_cu_num;
1671 uint32_t i, j, k;
1672 uint32_t mask, cu_bitmap, counter;
1673
1674 if (adev->flags & AMD_IS_APU)
1675 always_on_cu_num = 4;
1676 else if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 2, 1))
1677 always_on_cu_num = 8;
1678 else
1679 always_on_cu_num = 12;
1680
1681 mutex_lock(&adev->grbm_idx_mutex);
1682 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1683 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1684 mask = 1;
1685 cu_bitmap = 0;
1686 counter = 0;
1687 amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
1688
1689 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1690 if (cu_info->bitmap[0][i][j] & mask) {
1691 if (counter == pg_always_on_cu_num)
1692 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1693 if (counter < always_on_cu_num)
1694 cu_bitmap |= mask;
1695 else
1696 break;
1697 counter++;
1698 }
1699 mask <<= 1;
1700 }
1701
1702 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1703 cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1704 }
1705 }
1706 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1707 mutex_unlock(&adev->grbm_idx_mutex);
1708 }
1709
gfx_v9_0_init_lbpw(struct amdgpu_device * adev)1710 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1711 {
1712 uint32_t data;
1713
1714 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1715 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1716 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1717 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1718 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1719
1720 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1721 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1722
1723 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1724 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1725
1726 mutex_lock(&adev->grbm_idx_mutex);
1727 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1728 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1729 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1730
1731 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1732 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1733 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1734 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1735 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1736
1737 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1738 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1739 data &= 0x0000FFFF;
1740 data |= 0x00C00000;
1741 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1742
1743 /*
1744 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1745 * programmed in gfx_v9_0_init_always_on_cu_mask()
1746 */
1747
1748 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1749 * but used for RLC_LB_CNTL configuration */
1750 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1751 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1752 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1753 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1754 mutex_unlock(&adev->grbm_idx_mutex);
1755
1756 gfx_v9_0_init_always_on_cu_mask(adev);
1757 }
1758
gfx_v9_4_init_lbpw(struct amdgpu_device * adev)1759 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1760 {
1761 uint32_t data;
1762
1763 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1764 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1765 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1766 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1767 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1768
1769 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1770 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1771
1772 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1773 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1774
1775 mutex_lock(&adev->grbm_idx_mutex);
1776 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1777 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1778 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1779
1780 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1781 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1782 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1783 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1784 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1785
1786 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1787 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1788 data &= 0x0000FFFF;
1789 data |= 0x00C00000;
1790 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1791
1792 /*
1793 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1794 * programmed in gfx_v9_0_init_always_on_cu_mask()
1795 */
1796
1797 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1798 * but used for RLC_LB_CNTL configuration */
1799 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1800 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1801 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1802 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1803 mutex_unlock(&adev->grbm_idx_mutex);
1804
1805 gfx_v9_0_init_always_on_cu_mask(adev);
1806 }
1807
gfx_v9_0_enable_lbpw(struct amdgpu_device * adev,bool enable)1808 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1809 {
1810 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1811 }
1812
gfx_v9_0_cp_jump_table_num(struct amdgpu_device * adev)1813 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1814 {
1815 if (gfx_v9_0_load_mec2_fw_bin_support(adev))
1816 return 5;
1817 else
1818 return 4;
1819 }
1820
gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device * adev)1821 static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1822 {
1823 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1824
1825 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
1826 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1827 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
1828 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
1829 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
1830 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
1831 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
1832 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
1833 adev->gfx.rlc.rlcg_reg_access_supported = true;
1834 }
1835
gfx_v9_0_rlc_init(struct amdgpu_device * adev)1836 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1837 {
1838 const struct cs_section_def *cs_data;
1839 int r;
1840
1841 adev->gfx.rlc.cs_data = gfx9_cs_data;
1842
1843 cs_data = adev->gfx.rlc.cs_data;
1844
1845 if (cs_data) {
1846 /* init clear state block */
1847 r = amdgpu_gfx_rlc_init_csb(adev);
1848 if (r)
1849 return r;
1850 }
1851
1852 if (adev->flags & AMD_IS_APU) {
1853 /* TODO: double check the cp_table_size for RV */
1854 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1855 r = amdgpu_gfx_rlc_init_cpt(adev);
1856 if (r)
1857 return r;
1858 }
1859
1860 return 0;
1861 }
1862
gfx_v9_0_mec_fini(struct amdgpu_device * adev)1863 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1864 {
1865 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1866 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1867 }
1868
gfx_v9_0_mec_init(struct amdgpu_device * adev)1869 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1870 {
1871 int r;
1872 u32 *hpd;
1873 const __le32 *fw_data;
1874 unsigned fw_size;
1875 u32 *fw;
1876 size_t mec_hpd_size;
1877
1878 const struct gfx_firmware_header_v1_0 *mec_hdr;
1879
1880 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1881
1882 /* take ownership of the relevant compute queues */
1883 amdgpu_gfx_compute_queue_acquire(adev);
1884 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1885 if (mec_hpd_size) {
1886 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1887 AMDGPU_GEM_DOMAIN_VRAM |
1888 AMDGPU_GEM_DOMAIN_GTT,
1889 &adev->gfx.mec.hpd_eop_obj,
1890 &adev->gfx.mec.hpd_eop_gpu_addr,
1891 (void **)&hpd);
1892 if (r) {
1893 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1894 gfx_v9_0_mec_fini(adev);
1895 return r;
1896 }
1897
1898 memset(hpd, 0, mec_hpd_size);
1899
1900 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1901 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1902 }
1903
1904 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1905
1906 fw_data = (const __le32 *)
1907 (adev->gfx.mec_fw->data +
1908 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1909 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1910
1911 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1912 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1913 &adev->gfx.mec.mec_fw_obj,
1914 &adev->gfx.mec.mec_fw_gpu_addr,
1915 (void **)&fw);
1916 if (r) {
1917 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1918 gfx_v9_0_mec_fini(adev);
1919 return r;
1920 }
1921
1922 memcpy(fw, fw_data, fw_size);
1923
1924 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1925 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1926
1927 return 0;
1928 }
1929
wave_read_ind(struct amdgpu_device * adev,uint32_t simd,uint32_t wave,uint32_t address)1930 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1931 {
1932 WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1933 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1934 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1935 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1936 (SQ_IND_INDEX__FORCE_READ_MASK));
1937 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1938 }
1939
wave_read_regs(struct amdgpu_device * adev,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)1940 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1941 uint32_t wave, uint32_t thread,
1942 uint32_t regno, uint32_t num, uint32_t *out)
1943 {
1944 WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1945 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1946 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1947 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1948 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1949 (SQ_IND_INDEX__FORCE_READ_MASK) |
1950 (SQ_IND_INDEX__AUTO_INCR_MASK));
1951 while (num--)
1952 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1953 }
1954
gfx_v9_0_read_wave_data(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)1955 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1956 {
1957 /* type 1 wave data */
1958 dst[(*no_fields)++] = 1;
1959 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1960 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1961 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1962 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1963 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1964 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1965 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1966 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1967 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1968 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1969 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1970 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1971 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1972 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1973 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
1974 }
1975
gfx_v9_0_read_wave_sgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)1976 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1977 uint32_t wave, uint32_t start,
1978 uint32_t size, uint32_t *dst)
1979 {
1980 wave_read_regs(
1981 adev, simd, wave, 0,
1982 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1983 }
1984
gfx_v9_0_read_wave_vgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t start,uint32_t size,uint32_t * dst)1985 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1986 uint32_t wave, uint32_t thread,
1987 uint32_t start, uint32_t size,
1988 uint32_t *dst)
1989 {
1990 wave_read_regs(
1991 adev, simd, wave, thread,
1992 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1993 }
1994
gfx_v9_0_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm,u32 xcc_id)1995 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1996 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
1997 {
1998 soc15_grbm_select(adev, me, pipe, q, vm, 0);
1999 }
2000
2001 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2002 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2003 .select_se_sh = &gfx_v9_0_select_se_sh,
2004 .read_wave_data = &gfx_v9_0_read_wave_data,
2005 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2006 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2007 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2008 .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
2009 };
2010
2011 const struct amdgpu_ras_block_hw_ops gfx_v9_0_ras_ops = {
2012 .ras_error_inject = &gfx_v9_0_ras_error_inject,
2013 .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2014 .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2015 };
2016
2017 static struct amdgpu_gfx_ras gfx_v9_0_ras = {
2018 .ras_block = {
2019 .hw_ops = &gfx_v9_0_ras_ops,
2020 },
2021 };
2022
gfx_v9_0_gpu_early_init(struct amdgpu_device * adev)2023 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2024 {
2025 u32 gb_addr_config;
2026 int err;
2027
2028 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2029 case IP_VERSION(9, 0, 1):
2030 adev->gfx.config.max_hw_contexts = 8;
2031 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2032 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2033 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2034 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2035 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2036 break;
2037 case IP_VERSION(9, 2, 1):
2038 adev->gfx.config.max_hw_contexts = 8;
2039 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2040 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2041 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2042 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2043 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2044 drm_info(adev_to_drm(adev), "fix gfx.config for vega12\n");
2045 break;
2046 case IP_VERSION(9, 4, 0):
2047 adev->gfx.ras = &gfx_v9_0_ras;
2048 adev->gfx.config.max_hw_contexts = 8;
2049 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2050 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2051 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2052 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2053 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2054 gb_addr_config &= ~0xf3e777ff;
2055 gb_addr_config |= 0x22014042;
2056 /* check vbios table if gpu info is not available */
2057 err = amdgpu_atomfirmware_get_gfx_info(adev);
2058 if (err)
2059 return err;
2060 break;
2061 case IP_VERSION(9, 2, 2):
2062 case IP_VERSION(9, 1, 0):
2063 adev->gfx.config.max_hw_contexts = 8;
2064 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2065 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2066 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2067 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2068 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2069 gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2070 else
2071 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2072 break;
2073 case IP_VERSION(9, 4, 1):
2074 adev->gfx.ras = &gfx_v9_4_ras;
2075 adev->gfx.config.max_hw_contexts = 8;
2076 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2077 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2078 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2079 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2080 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2081 gb_addr_config &= ~0xf3e777ff;
2082 gb_addr_config |= 0x22014042;
2083 break;
2084 case IP_VERSION(9, 3, 0):
2085 adev->gfx.config.max_hw_contexts = 8;
2086 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2087 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2088 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2089 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2090 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2091 gb_addr_config &= ~0xf3e777ff;
2092 gb_addr_config |= 0x22010042;
2093 break;
2094 case IP_VERSION(9, 4, 2):
2095 adev->gfx.ras = &gfx_v9_4_2_ras;
2096 adev->gfx.config.max_hw_contexts = 8;
2097 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2098 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2099 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2100 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2101 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2102 gb_addr_config &= ~0xf3e777ff;
2103 gb_addr_config |= 0x22014042;
2104 /* check vbios table if gpu info is not available */
2105 err = amdgpu_atomfirmware_get_gfx_info(adev);
2106 if (err)
2107 return err;
2108 break;
2109 default:
2110 BUG();
2111 break;
2112 }
2113
2114 adev->gfx.config.gb_addr_config = gb_addr_config;
2115
2116 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2117 REG_GET_FIELD(
2118 adev->gfx.config.gb_addr_config,
2119 GB_ADDR_CONFIG,
2120 NUM_PIPES);
2121
2122 adev->gfx.config.max_tile_pipes =
2123 adev->gfx.config.gb_addr_config_fields.num_pipes;
2124
2125 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2126 REG_GET_FIELD(
2127 adev->gfx.config.gb_addr_config,
2128 GB_ADDR_CONFIG,
2129 NUM_BANKS);
2130 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2131 REG_GET_FIELD(
2132 adev->gfx.config.gb_addr_config,
2133 GB_ADDR_CONFIG,
2134 MAX_COMPRESSED_FRAGS);
2135 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2136 REG_GET_FIELD(
2137 adev->gfx.config.gb_addr_config,
2138 GB_ADDR_CONFIG,
2139 NUM_RB_PER_SE);
2140 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2141 REG_GET_FIELD(
2142 adev->gfx.config.gb_addr_config,
2143 GB_ADDR_CONFIG,
2144 NUM_SHADER_ENGINES);
2145 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2146 REG_GET_FIELD(
2147 adev->gfx.config.gb_addr_config,
2148 GB_ADDR_CONFIG,
2149 PIPE_INTERLEAVE_SIZE));
2150
2151 return 0;
2152 }
2153
gfx_v9_0_compute_ring_init(struct amdgpu_device * adev,int ring_id,int mec,int pipe,int queue)2154 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2155 int mec, int pipe, int queue)
2156 {
2157 unsigned irq_type;
2158 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2159 unsigned int hw_prio;
2160
2161 ring = &adev->gfx.compute_ring[ring_id];
2162
2163 /* mec0 is me1 */
2164 ring->me = mec + 1;
2165 ring->pipe = pipe;
2166 ring->queue = queue;
2167
2168 ring->ring_obj = NULL;
2169 ring->use_doorbell = true;
2170 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2171 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2172 + (ring_id * GFX9_MEC_HPD_SIZE);
2173 ring->vm_hub = AMDGPU_GFXHUB(0);
2174 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2175
2176 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2177 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2178 + ring->pipe;
2179 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
2180 AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
2181 /* type-2 packets are deprecated on MEC, use type-3 instead */
2182 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
2183 hw_prio, NULL);
2184 }
2185
gfx_v9_0_alloc_ip_dump(struct amdgpu_device * adev)2186 static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
2187 {
2188 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
2189 uint32_t *ptr;
2190 uint32_t inst;
2191
2192 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
2193 if (!ptr) {
2194 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
2195 adev->gfx.ip_dump_core = NULL;
2196 } else {
2197 adev->gfx.ip_dump_core = ptr;
2198 }
2199
2200 /* Allocate memory for compute queue registers for all the instances */
2201 reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
2202 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
2203 adev->gfx.mec.num_queue_per_pipe;
2204
2205 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
2206 if (!ptr) {
2207 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
2208 adev->gfx.ip_dump_compute_queues = NULL;
2209 } else {
2210 adev->gfx.ip_dump_compute_queues = ptr;
2211 }
2212 }
2213
gfx_v9_0_sw_init(struct amdgpu_ip_block * ip_block)2214 static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
2215 {
2216 int i, j, k, r, ring_id;
2217 int xcc_id = 0;
2218 struct amdgpu_ring *ring;
2219 struct amdgpu_device *adev = ip_block->adev;
2220 unsigned int hw_prio;
2221
2222 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2223 case IP_VERSION(9, 0, 1):
2224 case IP_VERSION(9, 2, 1):
2225 case IP_VERSION(9, 4, 0):
2226 case IP_VERSION(9, 2, 2):
2227 case IP_VERSION(9, 1, 0):
2228 case IP_VERSION(9, 4, 1):
2229 case IP_VERSION(9, 3, 0):
2230 case IP_VERSION(9, 4, 2):
2231 adev->gfx.mec.num_mec = 2;
2232 break;
2233 default:
2234 adev->gfx.mec.num_mec = 1;
2235 break;
2236 }
2237
2238 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2239 case IP_VERSION(9, 0, 1):
2240 case IP_VERSION(9, 2, 1):
2241 case IP_VERSION(9, 4, 0):
2242 case IP_VERSION(9, 2, 2):
2243 case IP_VERSION(9, 1, 0):
2244 case IP_VERSION(9, 3, 0):
2245 adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
2246 adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
2247 if (adev->gfx.me_fw_version >= 167 &&
2248 adev->gfx.pfp_fw_version >= 196 &&
2249 adev->gfx.mec_fw_version >= 474) {
2250 adev->gfx.enable_cleaner_shader = true;
2251 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
2252 if (r) {
2253 adev->gfx.enable_cleaner_shader = false;
2254 dev_err(adev->dev, "Failed to initialize cleaner shader\n");
2255 }
2256 }
2257 break;
2258 case IP_VERSION(9, 4, 2):
2259 adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
2260 adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
2261 if (adev->gfx.mec_fw_version >= 88) {
2262 adev->gfx.enable_cleaner_shader = true;
2263 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
2264 if (r) {
2265 adev->gfx.enable_cleaner_shader = false;
2266 dev_err(adev->dev, "Failed to initialize cleaner shader\n");
2267 }
2268 }
2269 break;
2270 default:
2271 adev->gfx.enable_cleaner_shader = false;
2272 break;
2273 }
2274
2275 adev->gfx.mec.num_pipe_per_mec = 4;
2276 adev->gfx.mec.num_queue_per_pipe = 8;
2277
2278 /* EOP Event */
2279 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2280 if (r)
2281 return r;
2282
2283 /* Bad opcode Event */
2284 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
2285 GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
2286 &adev->gfx.bad_op_irq);
2287 if (r)
2288 return r;
2289
2290 /* Privileged reg */
2291 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2292 &adev->gfx.priv_reg_irq);
2293 if (r)
2294 return r;
2295
2296 /* Privileged inst */
2297 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2298 &adev->gfx.priv_inst_irq);
2299 if (r)
2300 return r;
2301
2302 /* ECC error */
2303 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2304 &adev->gfx.cp_ecc_error_irq);
2305 if (r)
2306 return r;
2307
2308 /* FUE error */
2309 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2310 &adev->gfx.cp_ecc_error_irq);
2311 if (r)
2312 return r;
2313
2314 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2315
2316 if (adev->gfx.rlc.funcs) {
2317 if (adev->gfx.rlc.funcs->init) {
2318 r = adev->gfx.rlc.funcs->init(adev);
2319 if (r) {
2320 dev_err(adev->dev, "Failed to init rlc BOs!\n");
2321 return r;
2322 }
2323 }
2324 }
2325
2326 r = gfx_v9_0_mec_init(adev);
2327 if (r) {
2328 DRM_ERROR("Failed to init MEC BOs!\n");
2329 return r;
2330 }
2331
2332 /* set up the gfx ring */
2333 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2334 ring = &adev->gfx.gfx_ring[i];
2335 ring->ring_obj = NULL;
2336 if (!i)
2337 sprintf(ring->name, "gfx");
2338 else
2339 sprintf(ring->name, "gfx_%d", i);
2340 ring->use_doorbell = true;
2341 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2342
2343 /* disable scheduler on the real ring */
2344 ring->no_scheduler = adev->gfx.mcbp;
2345 ring->vm_hub = AMDGPU_GFXHUB(0);
2346 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2347 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2348 AMDGPU_RING_PRIO_DEFAULT, NULL);
2349 if (r)
2350 return r;
2351 }
2352
2353 /* set up the software rings */
2354 if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
2355 for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
2356 ring = &adev->gfx.sw_gfx_ring[i];
2357 ring->ring_obj = NULL;
2358 strscpy(ring->name, amdgpu_sw_ring_name(i), sizeof(ring->name));
2359 ring->use_doorbell = true;
2360 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2361 ring->is_sw_ring = true;
2362 hw_prio = amdgpu_sw_ring_priority(i);
2363 ring->vm_hub = AMDGPU_GFXHUB(0);
2364 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2365 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
2366 NULL);
2367 if (r)
2368 return r;
2369 ring->wptr = 0;
2370 }
2371
2372 /* init the muxer and add software rings */
2373 r = amdgpu_ring_mux_init(&adev->gfx.muxer, &adev->gfx.gfx_ring[0],
2374 GFX9_NUM_SW_GFX_RINGS);
2375 if (r) {
2376 DRM_ERROR("amdgpu_ring_mux_init failed(%d)\n", r);
2377 return r;
2378 }
2379 for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
2380 r = amdgpu_ring_mux_add_sw_ring(&adev->gfx.muxer,
2381 &adev->gfx.sw_gfx_ring[i]);
2382 if (r) {
2383 DRM_ERROR("amdgpu_ring_mux_add_sw_ring failed(%d)\n", r);
2384 return r;
2385 }
2386 }
2387 }
2388
2389 /* set up the compute queues - allocate horizontally across pipes */
2390 ring_id = 0;
2391 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2392 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2393 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2394 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
2395 k, j))
2396 continue;
2397
2398 r = gfx_v9_0_compute_ring_init(adev,
2399 ring_id,
2400 i, k, j);
2401 if (r)
2402 return r;
2403
2404 ring_id++;
2405 }
2406 }
2407 }
2408
2409 /* TODO: Add queue reset mask when FW fully supports it */
2410 adev->gfx.gfx_supported_reset =
2411 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
2412 adev->gfx.compute_supported_reset =
2413 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
2414 if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset) {
2415 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
2416 adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
2417 }
2418
2419 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
2420 if (r) {
2421 DRM_ERROR("Failed to init KIQ BOs!\n");
2422 return r;
2423 }
2424
2425 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
2426 if (r)
2427 return r;
2428
2429 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2430 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0);
2431 if (r)
2432 return r;
2433
2434 adev->gfx.ce_ram_size = 0x8000;
2435
2436 r = gfx_v9_0_gpu_early_init(adev);
2437 if (r)
2438 return r;
2439
2440 if (amdgpu_gfx_ras_sw_init(adev)) {
2441 dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
2442 return -EINVAL;
2443 }
2444
2445 gfx_v9_0_alloc_ip_dump(adev);
2446
2447 r = amdgpu_gfx_sysfs_init(adev);
2448 if (r)
2449 return r;
2450
2451 return 0;
2452 }
2453
2454
gfx_v9_0_sw_fini(struct amdgpu_ip_block * ip_block)2455 static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
2456 {
2457 int i;
2458 struct amdgpu_device *adev = ip_block->adev;
2459
2460 if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
2461 for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
2462 amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]);
2463 amdgpu_ring_mux_fini(&adev->gfx.muxer);
2464 }
2465
2466 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2467 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2468 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2469 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2470
2471 amdgpu_gfx_mqd_sw_fini(adev, 0);
2472 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2473 amdgpu_gfx_kiq_fini(adev, 0);
2474
2475 amdgpu_gfx_cleaner_shader_sw_fini(adev);
2476
2477 gfx_v9_0_mec_fini(adev);
2478 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2479 &adev->gfx.rlc.clear_state_gpu_addr,
2480 (void **)&adev->gfx.rlc.cs_ptr);
2481 if (adev->flags & AMD_IS_APU) {
2482 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2483 &adev->gfx.rlc.cp_table_gpu_addr,
2484 (void **)&adev->gfx.rlc.cp_table_ptr);
2485 }
2486 gfx_v9_0_free_microcode(adev);
2487
2488 amdgpu_gfx_sysfs_fini(adev);
2489
2490 kfree(adev->gfx.ip_dump_core);
2491 kfree(adev->gfx.ip_dump_compute_queues);
2492
2493 return 0;
2494 }
2495
2496
gfx_v9_0_tiling_mode_table_init(struct amdgpu_device * adev)2497 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2498 {
2499 /* TODO */
2500 }
2501
gfx_v9_0_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance,int xcc_id)2502 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2503 u32 instance, int xcc_id)
2504 {
2505 u32 data;
2506
2507 if (instance == 0xffffffff)
2508 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2509 else
2510 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2511
2512 if (se_num == 0xffffffff)
2513 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2514 else
2515 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2516
2517 if (sh_num == 0xffffffff)
2518 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2519 else
2520 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2521
2522 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2523 }
2524
gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device * adev)2525 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2526 {
2527 u32 data, mask;
2528
2529 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2530 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2531
2532 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2533 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2534
2535 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2536 adev->gfx.config.max_sh_per_se);
2537
2538 return (~data) & mask;
2539 }
2540
gfx_v9_0_setup_rb(struct amdgpu_device * adev)2541 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2542 {
2543 int i, j;
2544 u32 data;
2545 u32 active_rbs = 0;
2546 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2547 adev->gfx.config.max_sh_per_se;
2548
2549 mutex_lock(&adev->grbm_idx_mutex);
2550 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2551 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2552 amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
2553 data = gfx_v9_0_get_rb_active_bitmap(adev);
2554 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2555 rb_bitmap_width_per_sh);
2556 }
2557 }
2558 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2559 mutex_unlock(&adev->grbm_idx_mutex);
2560
2561 adev->gfx.config.backend_enable_mask = active_rbs;
2562 adev->gfx.config.num_rbs = hweight32(active_rbs);
2563 }
2564
gfx_v9_0_debug_trap_config_init(struct amdgpu_device * adev,uint32_t first_vmid,uint32_t last_vmid)2565 static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev,
2566 uint32_t first_vmid,
2567 uint32_t last_vmid)
2568 {
2569 uint32_t data;
2570 uint32_t trap_config_vmid_mask = 0;
2571 int i;
2572
2573 /* Calculate trap config vmid mask */
2574 for (i = first_vmid; i < last_vmid; i++)
2575 trap_config_vmid_mask |= (1 << i);
2576
2577 data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG,
2578 VMID_SEL, trap_config_vmid_mask);
2579 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
2580 TRAP_EN, 1);
2581 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
2582 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
2583
2584 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
2585 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
2586 }
2587
2588 #define DEFAULT_SH_MEM_BASES (0x6000)
gfx_v9_0_init_compute_vmid(struct amdgpu_device * adev)2589 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2590 {
2591 int i;
2592 uint32_t sh_mem_config;
2593 uint32_t sh_mem_bases;
2594
2595 /*
2596 * Configure apertures:
2597 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
2598 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
2599 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
2600 */
2601 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2602
2603 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2604 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2605 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2606
2607 mutex_lock(&adev->srbm_mutex);
2608 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2609 soc15_grbm_select(adev, 0, 0, 0, i, 0);
2610 /* CP and shaders */
2611 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2612 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2613 }
2614 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
2615 mutex_unlock(&adev->srbm_mutex);
2616
2617 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2618 access. These should be enabled by FW for target VMIDs. */
2619 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2620 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2621 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2622 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2623 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2624 }
2625 }
2626
gfx_v9_0_init_gds_vmid(struct amdgpu_device * adev)2627 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2628 {
2629 int vmid;
2630
2631 /*
2632 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2633 * access. Compute VMIDs should be enabled by FW for target VMIDs,
2634 * the driver can enable them for graphics. VMID0 should maintain
2635 * access so that HWS firmware can save/restore entries.
2636 */
2637 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2638 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2639 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2640 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2641 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2642 }
2643 }
2644
gfx_v9_0_init_sq_config(struct amdgpu_device * adev)2645 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2646 {
2647 uint32_t tmp;
2648
2649 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2650 case IP_VERSION(9, 4, 1):
2651 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2652 tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
2653 !READ_ONCE(adev->barrier_has_auto_waitcnt));
2654 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2655 break;
2656 case IP_VERSION(9, 4, 2):
2657 gfx_v9_4_2_init_sq(adev);
2658 break;
2659 default:
2660 break;
2661 }
2662 }
2663
gfx_v9_0_constants_init(struct amdgpu_device * adev)2664 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2665 {
2666 u32 tmp;
2667 int i;
2668
2669 if (!amdgpu_sriov_vf(adev) ||
2670 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) {
2671 WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2672 }
2673
2674 gfx_v9_0_tiling_mode_table_init(adev);
2675
2676 if (adev->gfx.num_gfx_rings)
2677 gfx_v9_0_setup_rb(adev);
2678 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2679 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2680
2681 /* XXX SH_MEM regs */
2682 /* where to put LDS, scratch, GPUVM in FSA64 space */
2683 mutex_lock(&adev->srbm_mutex);
2684 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
2685 soc15_grbm_select(adev, 0, 0, 0, i, 0);
2686 /* CP and shaders */
2687 if (i == 0) {
2688 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2689 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2690 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2691 !!adev->gmc.noretry);
2692 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2693 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2694 } else {
2695 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2696 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2697 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2698 !!adev->gmc.noretry);
2699 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2700 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2701 (adev->gmc.private_aperture_start >> 48));
2702 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2703 (adev->gmc.shared_aperture_start >> 48));
2704 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2705 }
2706 }
2707 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
2708
2709 mutex_unlock(&adev->srbm_mutex);
2710
2711 gfx_v9_0_init_compute_vmid(adev);
2712 gfx_v9_0_init_gds_vmid(adev);
2713 gfx_v9_0_init_sq_config(adev);
2714 }
2715
gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device * adev)2716 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2717 {
2718 u32 i, j, k;
2719 u32 mask;
2720
2721 mutex_lock(&adev->grbm_idx_mutex);
2722 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2723 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2724 amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
2725 for (k = 0; k < adev->usec_timeout; k++) {
2726 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2727 break;
2728 udelay(1);
2729 }
2730 if (k == adev->usec_timeout) {
2731 amdgpu_gfx_select_se_sh(adev, 0xffffffff,
2732 0xffffffff, 0xffffffff, 0);
2733 mutex_unlock(&adev->grbm_idx_mutex);
2734 drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n",
2735 i, j);
2736 return;
2737 }
2738 }
2739 }
2740 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
2741 mutex_unlock(&adev->grbm_idx_mutex);
2742
2743 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2744 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2745 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2746 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2747 for (k = 0; k < adev->usec_timeout; k++) {
2748 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2749 break;
2750 udelay(1);
2751 }
2752 }
2753
gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable)2754 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2755 bool enable)
2756 {
2757 u32 tmp;
2758
2759 /* These interrupts should be enabled to drive DS clock */
2760
2761 tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2762
2763 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2764 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2765 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2766 if (adev->gfx.num_gfx_rings)
2767 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2768
2769 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2770 }
2771
gfx_v9_0_init_csb(struct amdgpu_device * adev)2772 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2773 {
2774 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2775 /* csib */
2776 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2777 adev->gfx.rlc.clear_state_gpu_addr >> 32);
2778 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2779 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2780 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2781 adev->gfx.rlc.clear_state_size);
2782 }
2783
gfx_v9_1_parse_ind_reg_list(int * register_list_format,int indirect_offset,int list_size,int * unique_indirect_regs,int unique_indirect_reg_count,int * indirect_start_offsets,int * indirect_start_offsets_count,int max_start_offsets_count)2784 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2785 int indirect_offset,
2786 int list_size,
2787 int *unique_indirect_regs,
2788 int unique_indirect_reg_count,
2789 int *indirect_start_offsets,
2790 int *indirect_start_offsets_count,
2791 int max_start_offsets_count)
2792 {
2793 int idx;
2794
2795 for (; indirect_offset < list_size; indirect_offset++) {
2796 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2797 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2798 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2799
2800 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2801 indirect_offset += 2;
2802
2803 /* look for the matching indice */
2804 for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2805 if (unique_indirect_regs[idx] ==
2806 register_list_format[indirect_offset] ||
2807 !unique_indirect_regs[idx])
2808 break;
2809 }
2810
2811 BUG_ON(idx >= unique_indirect_reg_count);
2812
2813 if (!unique_indirect_regs[idx])
2814 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2815
2816 indirect_offset++;
2817 }
2818 }
2819 }
2820
gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device * adev)2821 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2822 {
2823 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2824 int unique_indirect_reg_count = 0;
2825
2826 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2827 int indirect_start_offsets_count = 0;
2828
2829 int list_size = 0;
2830 int i = 0, j = 0;
2831 u32 tmp = 0;
2832
2833 u32 *register_list_format =
2834 kmemdup(adev->gfx.rlc.register_list_format,
2835 adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2836 if (!register_list_format)
2837 return -ENOMEM;
2838
2839 /* setup unique_indirect_regs array and indirect_start_offsets array */
2840 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2841 gfx_v9_1_parse_ind_reg_list(register_list_format,
2842 adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2843 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2844 unique_indirect_regs,
2845 unique_indirect_reg_count,
2846 indirect_start_offsets,
2847 &indirect_start_offsets_count,
2848 ARRAY_SIZE(indirect_start_offsets));
2849
2850 /* enable auto inc in case it is disabled */
2851 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2852 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2853 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2854
2855 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2856 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2857 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2858 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2859 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2860 adev->gfx.rlc.register_restore[i]);
2861
2862 /* load indirect register */
2863 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2864 adev->gfx.rlc.reg_list_format_start);
2865
2866 /* direct register portion */
2867 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2868 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2869 register_list_format[i]);
2870
2871 /* indirect register portion */
2872 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2873 if (register_list_format[i] == 0xFFFFFFFF) {
2874 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2875 continue;
2876 }
2877
2878 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2879 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2880
2881 for (j = 0; j < unique_indirect_reg_count; j++) {
2882 if (register_list_format[i] == unique_indirect_regs[j]) {
2883 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2884 break;
2885 }
2886 }
2887
2888 BUG_ON(j >= unique_indirect_reg_count);
2889
2890 i++;
2891 }
2892
2893 /* set save/restore list size */
2894 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2895 list_size = list_size >> 1;
2896 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2897 adev->gfx.rlc.reg_restore_list_size);
2898 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2899
2900 /* write the starting offsets to RLC scratch ram */
2901 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2902 adev->gfx.rlc.starting_offsets_start);
2903 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2904 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2905 indirect_start_offsets[i]);
2906
2907 /* load unique indirect regs*/
2908 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2909 if (unique_indirect_regs[i] != 0) {
2910 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2911 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2912 unique_indirect_regs[i] & 0x3FFFF);
2913
2914 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2915 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2916 unique_indirect_regs[i] >> 20);
2917 }
2918 }
2919
2920 kfree(register_list_format);
2921 return 0;
2922 }
2923
gfx_v9_0_enable_save_restore_machine(struct amdgpu_device * adev)2924 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2925 {
2926 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2927 }
2928
pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device * adev,bool enable)2929 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2930 bool enable)
2931 {
2932 uint32_t data = 0;
2933 uint32_t default_data = 0;
2934
2935 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2936 if (enable) {
2937 /* enable GFXIP control over CGPG */
2938 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2939 if(default_data != data)
2940 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2941
2942 /* update status */
2943 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2944 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2945 if(default_data != data)
2946 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2947 } else {
2948 /* restore GFXIP control over GCPG */
2949 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2950 if(default_data != data)
2951 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2952 }
2953 }
2954
gfx_v9_0_init_gfx_power_gating(struct amdgpu_device * adev)2955 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2956 {
2957 uint32_t data = 0;
2958
2959 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2960 AMD_PG_SUPPORT_GFX_SMG |
2961 AMD_PG_SUPPORT_GFX_DMG)) {
2962 /* init IDLE_POLL_COUNT = 60 */
2963 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2964 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2965 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2966 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2967
2968 /* init RLC PG Delay */
2969 data = 0;
2970 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2971 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2972 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2973 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2974 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2975
2976 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2977 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2978 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2979 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2980
2981 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2982 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2983 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2984 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2985
2986 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2987 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2988
2989 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2990 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2991 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2992 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 3, 0))
2993 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2994 }
2995 }
2996
gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device * adev,bool enable)2997 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2998 bool enable)
2999 {
3000 uint32_t data = 0;
3001 uint32_t default_data = 0;
3002
3003 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3004 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3005 SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
3006 enable ? 1 : 0);
3007 if (default_data != data)
3008 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3009 }
3010
gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device * adev,bool enable)3011 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
3012 bool enable)
3013 {
3014 uint32_t data = 0;
3015 uint32_t default_data = 0;
3016
3017 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3018 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3019 SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
3020 enable ? 1 : 0);
3021 if(default_data != data)
3022 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3023 }
3024
gfx_v9_0_enable_cp_power_gating(struct amdgpu_device * adev,bool enable)3025 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
3026 bool enable)
3027 {
3028 uint32_t data = 0;
3029 uint32_t default_data = 0;
3030
3031 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3032 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3033 CP_PG_DISABLE,
3034 enable ? 0 : 1);
3035 if(default_data != data)
3036 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3037 }
3038
gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device * adev,bool enable)3039 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
3040 bool enable)
3041 {
3042 uint32_t data, default_data;
3043
3044 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3045 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3046 GFX_POWER_GATING_ENABLE,
3047 enable ? 1 : 0);
3048 if(default_data != data)
3049 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3050 }
3051
gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device * adev,bool enable)3052 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
3053 bool enable)
3054 {
3055 uint32_t data, default_data;
3056
3057 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3058 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3059 GFX_PIPELINE_PG_ENABLE,
3060 enable ? 1 : 0);
3061 if(default_data != data)
3062 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3063
3064 if (!enable)
3065 /* read any GFX register to wake up GFX */
3066 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
3067 }
3068
gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device * adev,bool enable)3069 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
3070 bool enable)
3071 {
3072 uint32_t data, default_data;
3073
3074 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3075 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3076 STATIC_PER_CU_PG_ENABLE,
3077 enable ? 1 : 0);
3078 if(default_data != data)
3079 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3080 }
3081
gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device * adev,bool enable)3082 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
3083 bool enable)
3084 {
3085 uint32_t data, default_data;
3086
3087 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3088 data = REG_SET_FIELD(data, RLC_PG_CNTL,
3089 DYN_PER_CU_PG_ENABLE,
3090 enable ? 1 : 0);
3091 if(default_data != data)
3092 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3093 }
3094
gfx_v9_0_init_pg(struct amdgpu_device * adev)3095 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
3096 {
3097 gfx_v9_0_init_csb(adev);
3098
3099 /*
3100 * Rlc save restore list is workable since v2_1.
3101 * And it's needed by gfxoff feature.
3102 */
3103 if (adev->gfx.rlc.is_rlc_v2_1) {
3104 if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
3105 IP_VERSION(9, 2, 1) ||
3106 (adev->apu_flags & AMD_APU_IS_RAVEN2))
3107 gfx_v9_1_init_rlc_save_restore_list(adev);
3108 gfx_v9_0_enable_save_restore_machine(adev);
3109 }
3110
3111 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3112 AMD_PG_SUPPORT_GFX_SMG |
3113 AMD_PG_SUPPORT_GFX_DMG |
3114 AMD_PG_SUPPORT_CP |
3115 AMD_PG_SUPPORT_GDS |
3116 AMD_PG_SUPPORT_RLC_SMU_HS)) {
3117 WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
3118 adev->gfx.rlc.cp_table_gpu_addr >> 8);
3119 gfx_v9_0_init_gfx_power_gating(adev);
3120 }
3121 }
3122
gfx_v9_0_rlc_stop(struct amdgpu_device * adev)3123 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3124 {
3125 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3126 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3127 gfx_v9_0_wait_for_rlc_serdes(adev);
3128 }
3129
gfx_v9_0_rlc_reset(struct amdgpu_device * adev)3130 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3131 {
3132 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3133 udelay(50);
3134 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3135 udelay(50);
3136 }
3137
gfx_v9_0_rlc_start(struct amdgpu_device * adev)3138 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3139 {
3140 #ifdef AMDGPU_RLC_DEBUG_RETRY
3141 u32 rlc_ucode_ver;
3142 #endif
3143
3144 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3145 udelay(50);
3146
3147 /* carrizo do enable cp interrupt after cp inited */
3148 if (!(adev->flags & AMD_IS_APU)) {
3149 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3150 udelay(50);
3151 }
3152
3153 #ifdef AMDGPU_RLC_DEBUG_RETRY
3154 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
3155 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3156 if(rlc_ucode_ver == 0x108) {
3157 drm_info(adev_to_drm(adev), "Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i\n",
3158 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3159 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3160 * default is 0x9C4 to create a 100us interval */
3161 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3162 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3163 * to disable the page fault retry interrupts, default is
3164 * 0x100 (256) */
3165 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3166 }
3167 #endif
3168 }
3169
gfx_v9_0_rlc_load_microcode(struct amdgpu_device * adev)3170 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3171 {
3172 const struct rlc_firmware_header_v2_0 *hdr;
3173 const __le32 *fw_data;
3174 unsigned i, fw_size;
3175
3176 if (!adev->gfx.rlc_fw)
3177 return -EINVAL;
3178
3179 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3180 amdgpu_ucode_print_rlc_hdr(&hdr->header);
3181
3182 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3183 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3184 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3185
3186 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3187 RLCG_UCODE_LOADING_START_ADDRESS);
3188 for (i = 0; i < fw_size; i++)
3189 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3190 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3191
3192 return 0;
3193 }
3194
gfx_v9_0_rlc_resume(struct amdgpu_device * adev)3195 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3196 {
3197 int r;
3198
3199 if (amdgpu_sriov_vf(adev)) {
3200 gfx_v9_0_init_csb(adev);
3201 return 0;
3202 }
3203
3204 adev->gfx.rlc.funcs->stop(adev);
3205
3206 /* disable CG */
3207 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3208
3209 gfx_v9_0_init_pg(adev);
3210
3211 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3212 /* legacy rlc firmware loading */
3213 r = gfx_v9_0_rlc_load_microcode(adev);
3214 if (r)
3215 return r;
3216 }
3217
3218 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
3219 case IP_VERSION(9, 2, 2):
3220 case IP_VERSION(9, 1, 0):
3221 gfx_v9_0_init_lbpw(adev);
3222 if (amdgpu_lbpw == 0)
3223 gfx_v9_0_enable_lbpw(adev, false);
3224 else
3225 gfx_v9_0_enable_lbpw(adev, true);
3226 break;
3227 case IP_VERSION(9, 4, 0):
3228 gfx_v9_4_init_lbpw(adev);
3229 if (amdgpu_lbpw > 0)
3230 gfx_v9_0_enable_lbpw(adev, true);
3231 else
3232 gfx_v9_0_enable_lbpw(adev, false);
3233 break;
3234 default:
3235 break;
3236 }
3237
3238 gfx_v9_0_update_spm_vmid_internal(adev, 0xf);
3239
3240 adev->gfx.rlc.funcs->start(adev);
3241
3242 return 0;
3243 }
3244
gfx_v9_0_cp_gfx_enable(struct amdgpu_device * adev,bool enable)3245 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3246 {
3247 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3248
3249 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1);
3250 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1);
3251 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1);
3252 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1);
3253 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1);
3254 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1);
3255 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1);
3256 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1);
3257 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1);
3258 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3259 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3260 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3261 WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3262 udelay(50);
3263 }
3264
gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device * adev)3265 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3266 {
3267 const struct gfx_firmware_header_v1_0 *pfp_hdr;
3268 const struct gfx_firmware_header_v1_0 *ce_hdr;
3269 const struct gfx_firmware_header_v1_0 *me_hdr;
3270 const __le32 *fw_data;
3271 unsigned i, fw_size;
3272
3273 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3274 return -EINVAL;
3275
3276 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3277 adev->gfx.pfp_fw->data;
3278 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3279 adev->gfx.ce_fw->data;
3280 me_hdr = (const struct gfx_firmware_header_v1_0 *)
3281 adev->gfx.me_fw->data;
3282
3283 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3284 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3285 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3286
3287 gfx_v9_0_cp_gfx_enable(adev, false);
3288
3289 /* PFP */
3290 fw_data = (const __le32 *)
3291 (adev->gfx.pfp_fw->data +
3292 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3293 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3294 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3295 for (i = 0; i < fw_size; i++)
3296 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3297 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3298
3299 /* CE */
3300 fw_data = (const __le32 *)
3301 (adev->gfx.ce_fw->data +
3302 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3303 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3304 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3305 for (i = 0; i < fw_size; i++)
3306 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3307 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3308
3309 /* ME */
3310 fw_data = (const __le32 *)
3311 (adev->gfx.me_fw->data +
3312 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3313 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3314 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3315 for (i = 0; i < fw_size; i++)
3316 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3317 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3318
3319 return 0;
3320 }
3321
gfx_v9_0_cp_gfx_start(struct amdgpu_device * adev)3322 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3323 {
3324 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3325 const struct cs_section_def *sect = NULL;
3326 const struct cs_extent_def *ext = NULL;
3327 int r, i, tmp;
3328
3329 /* init the CP */
3330 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3331 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3332
3333 gfx_v9_0_cp_gfx_enable(adev, true);
3334
3335 /* Now only limit the quirk on the APU gfx9 series and already
3336 * confirmed that the APU gfx10/gfx11 needn't such update.
3337 */
3338 if (adev->flags & AMD_IS_APU &&
3339 adev->in_s3 && !pm_resume_via_firmware()) {
3340 drm_info(adev_to_drm(adev), "Will skip the CSB packet resubmit\n");
3341 return 0;
3342 }
3343 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3344 if (r) {
3345 drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r);
3346 return r;
3347 }
3348
3349 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3350 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3351
3352 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3353 amdgpu_ring_write(ring, 0x80000000);
3354 amdgpu_ring_write(ring, 0x80000000);
3355
3356 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3357 for (ext = sect->section; ext->extent != NULL; ++ext) {
3358 if (sect->id == SECT_CONTEXT) {
3359 amdgpu_ring_write(ring,
3360 PACKET3(PACKET3_SET_CONTEXT_REG,
3361 ext->reg_count));
3362 amdgpu_ring_write(ring,
3363 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3364 for (i = 0; i < ext->reg_count; i++)
3365 amdgpu_ring_write(ring, ext->extent[i]);
3366 }
3367 }
3368 }
3369
3370 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3371 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3372
3373 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3374 amdgpu_ring_write(ring, 0);
3375
3376 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3377 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3378 amdgpu_ring_write(ring, 0x8000);
3379 amdgpu_ring_write(ring, 0x8000);
3380
3381 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3382 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3383 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3384 amdgpu_ring_write(ring, tmp);
3385 amdgpu_ring_write(ring, 0);
3386
3387 amdgpu_ring_commit(ring);
3388
3389 return 0;
3390 }
3391
gfx_v9_0_cp_gfx_resume(struct amdgpu_device * adev)3392 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3393 {
3394 struct amdgpu_ring *ring;
3395 u32 tmp;
3396 u32 rb_bufsz;
3397 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3398
3399 /* Set the write pointer delay */
3400 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3401
3402 /* set the RB to use vmid 0 */
3403 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3404
3405 /* Set ring buffer size */
3406 ring = &adev->gfx.gfx_ring[0];
3407 rb_bufsz = order_base_2(ring->ring_size / 8);
3408 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3409 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3410 #ifdef __BIG_ENDIAN
3411 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3412 #endif
3413 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3414
3415 /* Initialize the ring buffer's write pointers */
3416 ring->wptr = 0;
3417 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3418 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3419
3420 /* set the wb address whether it's enabled or not */
3421 rptr_addr = ring->rptr_gpu_addr;
3422 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3423 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3424
3425 wptr_gpu_addr = ring->wptr_gpu_addr;
3426 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3427 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3428
3429 mdelay(1);
3430 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3431
3432 rb_addr = ring->gpu_addr >> 8;
3433 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3434 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3435
3436 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3437 if (ring->use_doorbell) {
3438 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3439 DOORBELL_OFFSET, ring->doorbell_index);
3440 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3441 DOORBELL_EN, 1);
3442 } else {
3443 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3444 }
3445 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3446
3447 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3448 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3449 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3450
3451 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3452 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3453
3454
3455 /* start the ring */
3456 gfx_v9_0_cp_gfx_start(adev);
3457
3458 return 0;
3459 }
3460
gfx_v9_0_cp_compute_enable(struct amdgpu_device * adev,bool enable)3461 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3462 {
3463 if (enable) {
3464 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3465 } else {
3466 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3467 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
3468 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
3469 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
3470 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
3471 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
3472 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
3473 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
3474 CP_MEC_CNTL__MEC_ME1_HALT_MASK |
3475 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3476 adev->gfx.kiq[0].ring.sched.ready = false;
3477 }
3478 udelay(50);
3479 }
3480
gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device * adev)3481 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3482 {
3483 const struct gfx_firmware_header_v1_0 *mec_hdr;
3484 const __le32 *fw_data;
3485 unsigned i;
3486 u32 tmp;
3487
3488 if (!adev->gfx.mec_fw)
3489 return -EINVAL;
3490
3491 gfx_v9_0_cp_compute_enable(adev, false);
3492
3493 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3494 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3495
3496 fw_data = (const __le32 *)
3497 (adev->gfx.mec_fw->data +
3498 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3499 tmp = 0;
3500 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3501 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3502 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3503
3504 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3505 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3506 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3507 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3508
3509 /* MEC1 */
3510 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3511 mec_hdr->jt_offset);
3512 for (i = 0; i < mec_hdr->jt_size; i++)
3513 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3514 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3515
3516 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3517 adev->gfx.mec_fw_version);
3518 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3519
3520 return 0;
3521 }
3522
3523 /* KIQ functions */
gfx_v9_0_kiq_setting(struct amdgpu_ring * ring)3524 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3525 {
3526 uint32_t tmp;
3527 struct amdgpu_device *adev = ring->adev;
3528
3529 /* tell RLC which is KIQ queue */
3530 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3531 tmp &= 0xffffff00;
3532 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3533 WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
3534 }
3535
gfx_v9_0_mqd_set_priority(struct amdgpu_ring * ring,struct v9_mqd * mqd)3536 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3537 {
3538 struct amdgpu_device *adev = ring->adev;
3539
3540 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3541 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
3542 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3543 mqd->cp_hqd_queue_priority =
3544 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3545 }
3546 }
3547 }
3548
gfx_v9_0_mqd_init(struct amdgpu_ring * ring)3549 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3550 {
3551 struct amdgpu_device *adev = ring->adev;
3552 struct v9_mqd *mqd = ring->mqd_ptr;
3553 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3554 uint32_t tmp;
3555
3556 mqd->header = 0xC0310800;
3557 mqd->compute_pipelinestat_enable = 0x00000001;
3558 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3559 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3560 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3561 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3562 mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3563 mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3564 mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3565 mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3566 mqd->compute_misc_reserved = 0x00000003;
3567
3568 mqd->dynamic_cu_mask_addr_lo =
3569 lower_32_bits(ring->mqd_gpu_addr
3570 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3571 mqd->dynamic_cu_mask_addr_hi =
3572 upper_32_bits(ring->mqd_gpu_addr
3573 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3574
3575 eop_base_addr = ring->eop_gpu_addr >> 8;
3576 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3577 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3578
3579 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3580 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3581 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3582 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3583
3584 mqd->cp_hqd_eop_control = tmp;
3585
3586 /* enable doorbell? */
3587 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3588
3589 if (ring->use_doorbell) {
3590 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3591 DOORBELL_OFFSET, ring->doorbell_index);
3592 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3593 DOORBELL_EN, 1);
3594 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3595 DOORBELL_SOURCE, 0);
3596 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3597 DOORBELL_HIT, 0);
3598 } else {
3599 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3600 DOORBELL_EN, 0);
3601 }
3602
3603 mqd->cp_hqd_pq_doorbell_control = tmp;
3604
3605 /* disable the queue if it's active */
3606 ring->wptr = 0;
3607 mqd->cp_hqd_dequeue_request = 0;
3608 mqd->cp_hqd_pq_rptr = 0;
3609 mqd->cp_hqd_pq_wptr_lo = 0;
3610 mqd->cp_hqd_pq_wptr_hi = 0;
3611
3612 /* set the pointer to the MQD */
3613 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3614 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3615
3616 /* set MQD vmid to 0 */
3617 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3618 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3619 mqd->cp_mqd_control = tmp;
3620
3621 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3622 hqd_gpu_addr = ring->gpu_addr >> 8;
3623 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3624 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3625
3626 /* set up the HQD, this is similar to CP_RB0_CNTL */
3627 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3628 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3629 (order_base_2(ring->ring_size / 4) - 1));
3630 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3631 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3632 #ifdef __BIG_ENDIAN
3633 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3634 #endif
3635 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3636 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3637 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3638 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3639 mqd->cp_hqd_pq_control = tmp;
3640
3641 /* set the wb address whether it's enabled or not */
3642 wb_gpu_addr = ring->rptr_gpu_addr;
3643 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3644 mqd->cp_hqd_pq_rptr_report_addr_hi =
3645 upper_32_bits(wb_gpu_addr) & 0xffff;
3646
3647 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3648 wb_gpu_addr = ring->wptr_gpu_addr;
3649 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3650 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3651
3652 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3653 ring->wptr = 0;
3654 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3655
3656 /* set the vmid for the queue */
3657 mqd->cp_hqd_vmid = 0;
3658
3659 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3660 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3661 mqd->cp_hqd_persistent_state = tmp;
3662
3663 /* set MIN_IB_AVAIL_SIZE */
3664 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3665 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3666 mqd->cp_hqd_ib_control = tmp;
3667
3668 /* set static priority for a queue/ring */
3669 gfx_v9_0_mqd_set_priority(ring, mqd);
3670 mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
3671
3672 /* map_queues packet doesn't need activate the queue,
3673 * so only kiq need set this field.
3674 */
3675 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3676 mqd->cp_hqd_active = 1;
3677
3678 return 0;
3679 }
3680
gfx_v9_0_kiq_init_register(struct amdgpu_ring * ring)3681 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3682 {
3683 struct amdgpu_device *adev = ring->adev;
3684 struct v9_mqd *mqd = ring->mqd_ptr;
3685 int j;
3686
3687 /* disable wptr polling */
3688 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3689
3690 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3691 mqd->cp_hqd_eop_base_addr_lo);
3692 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3693 mqd->cp_hqd_eop_base_addr_hi);
3694
3695 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3696 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3697 mqd->cp_hqd_eop_control);
3698
3699 /* enable doorbell? */
3700 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3701 mqd->cp_hqd_pq_doorbell_control);
3702
3703 /* disable the queue if it's active */
3704 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3705 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3706 for (j = 0; j < adev->usec_timeout; j++) {
3707 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3708 break;
3709 udelay(1);
3710 }
3711 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3712 mqd->cp_hqd_dequeue_request);
3713 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3714 mqd->cp_hqd_pq_rptr);
3715 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3716 mqd->cp_hqd_pq_wptr_lo);
3717 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3718 mqd->cp_hqd_pq_wptr_hi);
3719 }
3720
3721 /* set the pointer to the MQD */
3722 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3723 mqd->cp_mqd_base_addr_lo);
3724 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3725 mqd->cp_mqd_base_addr_hi);
3726
3727 /* set MQD vmid to 0 */
3728 WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3729 mqd->cp_mqd_control);
3730
3731 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3732 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3733 mqd->cp_hqd_pq_base_lo);
3734 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3735 mqd->cp_hqd_pq_base_hi);
3736
3737 /* set up the HQD, this is similar to CP_RB0_CNTL */
3738 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3739 mqd->cp_hqd_pq_control);
3740
3741 /* set the wb address whether it's enabled or not */
3742 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3743 mqd->cp_hqd_pq_rptr_report_addr_lo);
3744 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3745 mqd->cp_hqd_pq_rptr_report_addr_hi);
3746
3747 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3748 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3749 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3750 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3751 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3752
3753 /* enable the doorbell if requested */
3754 if (ring->use_doorbell) {
3755 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3756 (adev->doorbell_index.kiq * 2) << 2);
3757 /* If GC has entered CGPG, ringing doorbell > first page
3758 * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
3759 * workaround this issue. And this change has to align with firmware
3760 * update.
3761 */
3762 if (check_if_enlarge_doorbell_range(adev))
3763 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3764 (adev->doorbell.size - 4));
3765 else
3766 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3767 (adev->doorbell_index.userqueue_end * 2) << 2);
3768 }
3769
3770 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3771 mqd->cp_hqd_pq_doorbell_control);
3772
3773 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3774 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3775 mqd->cp_hqd_pq_wptr_lo);
3776 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3777 mqd->cp_hqd_pq_wptr_hi);
3778
3779 /* set the vmid for the queue */
3780 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3781
3782 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3783 mqd->cp_hqd_persistent_state);
3784
3785 /* activate the queue */
3786 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3787 mqd->cp_hqd_active);
3788
3789 if (ring->use_doorbell)
3790 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3791
3792 return 0;
3793 }
3794
gfx_v9_0_kiq_fini_register(struct amdgpu_ring * ring)3795 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3796 {
3797 struct amdgpu_device *adev = ring->adev;
3798 int j;
3799
3800 /* disable the queue if it's active */
3801 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3802
3803 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3804
3805 for (j = 0; j < adev->usec_timeout; j++) {
3806 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3807 break;
3808 udelay(1);
3809 }
3810
3811 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3812 DRM_DEBUG("KIQ dequeue request failed.\n");
3813
3814 /* Manual disable if dequeue request times out */
3815 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3816 }
3817
3818 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3819 0);
3820 }
3821
3822 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3823 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3824 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3825 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3826 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3827 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3828 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3829 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3830
3831 return 0;
3832 }
3833
gfx_v9_0_kiq_init_queue(struct amdgpu_ring * ring)3834 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3835 {
3836 struct amdgpu_device *adev = ring->adev;
3837 struct v9_mqd *mqd = ring->mqd_ptr;
3838 struct v9_mqd *tmp_mqd;
3839
3840 gfx_v9_0_kiq_setting(ring);
3841
3842 /* GPU could be in bad state during probe, driver trigger the reset
3843 * after load the SMU, in this case , the mqd is not be initialized.
3844 * driver need to re-init the mqd.
3845 * check mqd->cp_hqd_pq_control since this value should not be 0
3846 */
3847 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup;
3848 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
3849 /* for GPU_RESET case , reset MQD to a clean status */
3850 if (adev->gfx.kiq[0].mqd_backup)
3851 memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation));
3852
3853 /* reset ring buffer */
3854 ring->wptr = 0;
3855 amdgpu_ring_clear_ring(ring);
3856
3857 mutex_lock(&adev->srbm_mutex);
3858 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3859 gfx_v9_0_kiq_init_register(ring);
3860 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3861 mutex_unlock(&adev->srbm_mutex);
3862 } else {
3863 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3864 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3865 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3866 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
3867 amdgpu_ring_clear_ring(ring);
3868 mutex_lock(&adev->srbm_mutex);
3869 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3870 gfx_v9_0_mqd_init(ring);
3871 gfx_v9_0_kiq_init_register(ring);
3872 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3873 mutex_unlock(&adev->srbm_mutex);
3874
3875 if (adev->gfx.kiq[0].mqd_backup)
3876 memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
3877 }
3878
3879 return 0;
3880 }
3881
gfx_v9_0_kcq_init_queue(struct amdgpu_ring * ring,bool restore)3882 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
3883 {
3884 struct amdgpu_device *adev = ring->adev;
3885 struct v9_mqd *mqd = ring->mqd_ptr;
3886 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3887 struct v9_mqd *tmp_mqd;
3888
3889 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
3890 * is not be initialized before
3891 */
3892 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3893
3894 if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
3895 (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
3896 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3897 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3898 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3899 mutex_lock(&adev->srbm_mutex);
3900 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
3901 gfx_v9_0_mqd_init(ring);
3902 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
3903 mutex_unlock(&adev->srbm_mutex);
3904
3905 if (adev->gfx.mec.mqd_backup[mqd_idx])
3906 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3907 } else {
3908 /* restore MQD to a clean status */
3909 if (adev->gfx.mec.mqd_backup[mqd_idx])
3910 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3911 /* reset ring buffer */
3912 ring->wptr = 0;
3913 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
3914 amdgpu_ring_clear_ring(ring);
3915 }
3916
3917 return 0;
3918 }
3919
gfx_v9_0_kiq_resume(struct amdgpu_device * adev)3920 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3921 {
3922 gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
3923 return 0;
3924 }
3925
gfx_v9_0_kcq_resume(struct amdgpu_device * adev)3926 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3927 {
3928 int i, r;
3929
3930 gfx_v9_0_cp_compute_enable(adev, true);
3931
3932 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3933 r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
3934 if (r)
3935 return r;
3936 }
3937
3938 return amdgpu_gfx_enable_kcq(adev, 0);
3939 }
3940
gfx_v9_0_cp_resume(struct amdgpu_device * adev)3941 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3942 {
3943 int r, i;
3944 struct amdgpu_ring *ring;
3945
3946 if (!(adev->flags & AMD_IS_APU))
3947 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3948
3949 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3950 if (adev->gfx.num_gfx_rings) {
3951 /* legacy firmware loading */
3952 r = gfx_v9_0_cp_gfx_load_microcode(adev);
3953 if (r)
3954 return r;
3955 }
3956
3957 r = gfx_v9_0_cp_compute_load_microcode(adev);
3958 if (r)
3959 return r;
3960 }
3961
3962 if (adev->gfx.num_gfx_rings)
3963 gfx_v9_0_cp_gfx_enable(adev, false);
3964 gfx_v9_0_cp_compute_enable(adev, false);
3965
3966 r = gfx_v9_0_kiq_resume(adev);
3967 if (r)
3968 return r;
3969
3970 if (adev->gfx.num_gfx_rings) {
3971 r = gfx_v9_0_cp_gfx_resume(adev);
3972 if (r)
3973 return r;
3974 }
3975
3976 r = gfx_v9_0_kcq_resume(adev);
3977 if (r)
3978 return r;
3979
3980 if (adev->gfx.num_gfx_rings) {
3981 ring = &adev->gfx.gfx_ring[0];
3982 r = amdgpu_ring_test_helper(ring);
3983 if (r)
3984 return r;
3985 }
3986
3987 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3988 ring = &adev->gfx.compute_ring[i];
3989 amdgpu_ring_test_helper(ring);
3990 }
3991
3992 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3993
3994 return 0;
3995 }
3996
gfx_v9_0_init_tcp_config(struct amdgpu_device * adev)3997 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3998 {
3999 u32 tmp;
4000
4001 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1) &&
4002 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
4003 return;
4004
4005 tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
4006 tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
4007 adev->df.hash_status.hash_64k);
4008 tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
4009 adev->df.hash_status.hash_2m);
4010 tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
4011 adev->df.hash_status.hash_1g);
4012 WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
4013 }
4014
gfx_v9_0_cp_enable(struct amdgpu_device * adev,bool enable)4015 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
4016 {
4017 if (adev->gfx.num_gfx_rings)
4018 gfx_v9_0_cp_gfx_enable(adev, enable);
4019 gfx_v9_0_cp_compute_enable(adev, enable);
4020 }
4021
gfx_v9_0_hw_init(struct amdgpu_ip_block * ip_block)4022 static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
4023 {
4024 int r;
4025 struct amdgpu_device *adev = ip_block->adev;
4026
4027 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
4028 adev->gfx.cleaner_shader_ptr);
4029
4030 if (!amdgpu_sriov_vf(adev))
4031 gfx_v9_0_init_golden_registers(adev);
4032
4033 gfx_v9_0_constants_init(adev);
4034
4035 gfx_v9_0_init_tcp_config(adev);
4036
4037 r = adev->gfx.rlc.funcs->resume(adev);
4038 if (r)
4039 return r;
4040
4041 r = gfx_v9_0_cp_resume(adev);
4042 if (r)
4043 return r;
4044
4045 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) &&
4046 !amdgpu_sriov_vf(adev))
4047 gfx_v9_4_2_set_power_brake_sequence(adev);
4048
4049 return r;
4050 }
4051
gfx_v9_0_hw_fini(struct amdgpu_ip_block * ip_block)4052 static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
4053 {
4054 struct amdgpu_device *adev = ip_block->adev;
4055
4056 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4057 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4058 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4059 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4060 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
4061
4062 /* DF freeze and kcq disable will fail */
4063 if (!amdgpu_ras_intr_triggered())
4064 /* disable KCQ to avoid CPC touch memory not valid anymore */
4065 amdgpu_gfx_disable_kcq(adev, 0);
4066
4067 if (amdgpu_sriov_vf(adev)) {
4068 gfx_v9_0_cp_gfx_enable(adev, false);
4069 /* must disable polling for SRIOV when hw finished, otherwise
4070 * CPC engine may still keep fetching WB address which is already
4071 * invalid after sw finished and trigger DMAR reading error in
4072 * hypervisor side.
4073 */
4074 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4075 return 0;
4076 }
4077
4078 /* Use deinitialize sequence from CAIL when unbinding device from driver,
4079 * otherwise KIQ is hanging when binding back
4080 */
4081 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4082 mutex_lock(&adev->srbm_mutex);
4083 soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
4084 adev->gfx.kiq[0].ring.pipe,
4085 adev->gfx.kiq[0].ring.queue, 0, 0);
4086 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
4087 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4088 mutex_unlock(&adev->srbm_mutex);
4089 }
4090
4091 gfx_v9_0_cp_enable(adev, false);
4092
4093 /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
4094 if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
4095 (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) {
4096 dev_dbg(adev->dev, "Skipping RLC halt\n");
4097 return 0;
4098 }
4099
4100 adev->gfx.rlc.funcs->stop(adev);
4101 return 0;
4102 }
4103
gfx_v9_0_suspend(struct amdgpu_ip_block * ip_block)4104 static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
4105 {
4106 return gfx_v9_0_hw_fini(ip_block);
4107 }
4108
gfx_v9_0_resume(struct amdgpu_ip_block * ip_block)4109 static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
4110 {
4111 return gfx_v9_0_hw_init(ip_block);
4112 }
4113
gfx_v9_0_is_idle(struct amdgpu_ip_block * ip_block)4114 static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
4115 {
4116 struct amdgpu_device *adev = ip_block->adev;
4117
4118 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
4119 GRBM_STATUS, GUI_ACTIVE))
4120 return false;
4121 else
4122 return true;
4123 }
4124
gfx_v9_0_wait_for_idle(struct amdgpu_ip_block * ip_block)4125 static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4126 {
4127 unsigned i;
4128 struct amdgpu_device *adev = ip_block->adev;
4129
4130 for (i = 0; i < adev->usec_timeout; i++) {
4131 if (gfx_v9_0_is_idle(ip_block))
4132 return 0;
4133 udelay(1);
4134 }
4135 return -ETIMEDOUT;
4136 }
4137
gfx_v9_0_soft_reset(struct amdgpu_ip_block * ip_block)4138 static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
4139 {
4140 u32 grbm_soft_reset = 0;
4141 u32 tmp;
4142 struct amdgpu_device *adev = ip_block->adev;
4143
4144 /* GRBM_STATUS */
4145 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4146 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4147 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4148 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4149 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4150 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4151 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4152 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4153 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4154 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4155 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4156 }
4157
4158 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4159 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4160 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4161 }
4162
4163 /* GRBM_STATUS2 */
4164 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4165 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4166 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4167 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4168
4169
4170 if (grbm_soft_reset) {
4171 /* stop the rlc */
4172 adev->gfx.rlc.funcs->stop(adev);
4173
4174 if (adev->gfx.num_gfx_rings)
4175 /* Disable GFX parsing/prefetching */
4176 gfx_v9_0_cp_gfx_enable(adev, false);
4177
4178 /* Disable MEC parsing/prefetching */
4179 gfx_v9_0_cp_compute_enable(adev, false);
4180
4181 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4182 tmp |= grbm_soft_reset;
4183 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4184 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4185 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4186
4187 udelay(50);
4188
4189 tmp &= ~grbm_soft_reset;
4190 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4191 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4192
4193 /* Wait a little for things to settle down */
4194 udelay(50);
4195 }
4196 return 0;
4197 }
4198
gfx_v9_0_kiq_read_clock(struct amdgpu_device * adev)4199 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4200 {
4201 signed long r, cnt = 0;
4202 unsigned long flags;
4203 uint32_t seq, reg_val_offs = 0;
4204 uint64_t value = 0;
4205 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
4206 struct amdgpu_ring *ring = &kiq->ring;
4207
4208 BUG_ON(!ring->funcs->emit_rreg);
4209
4210 spin_lock_irqsave(&kiq->ring_lock, flags);
4211 if (amdgpu_device_wb_get(adev, ®_val_offs)) {
4212 pr_err("critical bug! too many kiq readers\n");
4213 goto failed_unlock;
4214 }
4215 amdgpu_ring_alloc(ring, 32);
4216 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4217 amdgpu_ring_write(ring, 9 | /* src: register*/
4218 (5 << 8) | /* dst: memory */
4219 (1 << 16) | /* count sel */
4220 (1 << 20)); /* write confirm */
4221 amdgpu_ring_write(ring, 0);
4222 amdgpu_ring_write(ring, 0);
4223 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4224 reg_val_offs * 4));
4225 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4226 reg_val_offs * 4));
4227 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4228 if (r)
4229 goto failed_undo;
4230
4231 amdgpu_ring_commit(ring);
4232 spin_unlock_irqrestore(&kiq->ring_lock, flags);
4233
4234 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4235
4236 /* don't wait anymore for gpu reset case because this way may
4237 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4238 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4239 * never return if we keep waiting in virt_kiq_rreg, which cause
4240 * gpu_recover() hang there.
4241 *
4242 * also don't wait anymore for IRQ context
4243 * */
4244 if (r < 1 && (amdgpu_in_reset(adev)))
4245 goto failed_kiq_read;
4246
4247 might_sleep();
4248 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4249 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4250 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4251 }
4252
4253 if (cnt > MAX_KIQ_REG_TRY)
4254 goto failed_kiq_read;
4255
4256 mb();
4257 value = (uint64_t)adev->wb.wb[reg_val_offs] |
4258 (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4259 amdgpu_device_wb_free(adev, reg_val_offs);
4260 return value;
4261
4262 failed_undo:
4263 amdgpu_ring_undo(ring);
4264 failed_unlock:
4265 spin_unlock_irqrestore(&kiq->ring_lock, flags);
4266 failed_kiq_read:
4267 if (reg_val_offs)
4268 amdgpu_device_wb_free(adev, reg_val_offs);
4269 pr_err("failed to read gpu clock\n");
4270 return ~0;
4271 }
4272
gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device * adev)4273 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4274 {
4275 uint64_t clock, clock_lo, clock_hi, hi_check;
4276
4277 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4278 case IP_VERSION(9, 3, 0):
4279 preempt_disable();
4280 clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4281 clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4282 hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4283 /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
4284 * roughly every 42 seconds.
4285 */
4286 if (hi_check != clock_hi) {
4287 clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4288 clock_hi = hi_check;
4289 }
4290 preempt_enable();
4291 clock = clock_lo | (clock_hi << 32ULL);
4292 break;
4293 default:
4294 amdgpu_gfx_off_ctrl(adev, false);
4295 mutex_lock(&adev->gfx.gpu_clock_mutex);
4296 if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
4297 IP_VERSION(9, 0, 1) &&
4298 amdgpu_sriov_runtime(adev)) {
4299 clock = gfx_v9_0_kiq_read_clock(adev);
4300 } else {
4301 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4302 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4303 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4304 }
4305 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4306 amdgpu_gfx_off_ctrl(adev, true);
4307 break;
4308 }
4309 return clock;
4310 }
4311
gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring * ring,uint32_t vmid,uint32_t gds_base,uint32_t gds_size,uint32_t gws_base,uint32_t gws_size,uint32_t oa_base,uint32_t oa_size)4312 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4313 uint32_t vmid,
4314 uint32_t gds_base, uint32_t gds_size,
4315 uint32_t gws_base, uint32_t gws_size,
4316 uint32_t oa_base, uint32_t oa_size)
4317 {
4318 struct amdgpu_device *adev = ring->adev;
4319
4320 /* GDS Base */
4321 gfx_v9_0_write_data_to_reg(ring, 0, false,
4322 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4323 gds_base);
4324
4325 /* GDS Size */
4326 gfx_v9_0_write_data_to_reg(ring, 0, false,
4327 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4328 gds_size);
4329
4330 /* GWS */
4331 gfx_v9_0_write_data_to_reg(ring, 0, false,
4332 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4333 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4334
4335 /* OA */
4336 gfx_v9_0_write_data_to_reg(ring, 0, false,
4337 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4338 (1 << (oa_size + oa_base)) - (1 << oa_base));
4339 }
4340
4341 static const u32 vgpr_init_compute_shader[] =
4342 {
4343 0xb07c0000, 0xbe8000ff,
4344 0x000000f8, 0xbf110800,
4345 0x7e000280, 0x7e020280,
4346 0x7e040280, 0x7e060280,
4347 0x7e080280, 0x7e0a0280,
4348 0x7e0c0280, 0x7e0e0280,
4349 0x80808800, 0xbe803200,
4350 0xbf84fff5, 0xbf9c0000,
4351 0xd28c0001, 0x0001007f,
4352 0xd28d0001, 0x0002027e,
4353 0x10020288, 0xb8810904,
4354 0xb7814000, 0xd1196a01,
4355 0x00000301, 0xbe800087,
4356 0xbefc00c1, 0xd89c4000,
4357 0x00020201, 0xd89cc080,
4358 0x00040401, 0x320202ff,
4359 0x00000800, 0x80808100,
4360 0xbf84fff8, 0x7e020280,
4361 0xbf810000, 0x00000000,
4362 };
4363
4364 static const u32 sgpr_init_compute_shader[] =
4365 {
4366 0xb07c0000, 0xbe8000ff,
4367 0x0000005f, 0xbee50080,
4368 0xbe812c65, 0xbe822c65,
4369 0xbe832c65, 0xbe842c65,
4370 0xbe852c65, 0xb77c0005,
4371 0x80808500, 0xbf84fff8,
4372 0xbe800080, 0xbf810000,
4373 };
4374
4375 static const u32 vgpr_init_compute_shader_arcturus[] = {
4376 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4377 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4378 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4379 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4380 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4381 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4382 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4383 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4384 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4385 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4386 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4387 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4388 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4389 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4390 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4391 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4392 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4393 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4394 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4395 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4396 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4397 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4398 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4399 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4400 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4401 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4402 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4403 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4404 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4405 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4406 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4407 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4408 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4409 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4410 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4411 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4412 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4413 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4414 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4415 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4416 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4417 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4418 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4419 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4420 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4421 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4422 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4423 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4424 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4425 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4426 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4427 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4428 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4429 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4430 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4431 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4432 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4433 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4434 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4435 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4436 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4437 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4438 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4439 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4440 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4441 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4442 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4443 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4444 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4445 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4446 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4447 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4448 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4449 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4450 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4451 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4452 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4453 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4454 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4455 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4456 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4457 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4458 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4459 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4460 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4461 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4462 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4463 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4464 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4465 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4466 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4467 0xbf84fff8, 0xbf810000,
4468 };
4469
4470 /* When below register arrays changed, please update gpr_reg_size,
4471 and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4472 to cover all gfx9 ASICs */
4473 static const struct soc15_reg_entry vgpr_init_regs[] = {
4474 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4475 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4476 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4477 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4478 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4479 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
4480 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4481 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4482 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4483 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4484 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4485 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4486 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4487 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4488 };
4489
4490 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4491 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4492 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4493 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4494 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4495 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4496 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
4497 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4498 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4499 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4500 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4501 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4502 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4503 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4504 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4505 };
4506
4507 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4508 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4509 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4510 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4511 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4512 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4513 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4514 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4515 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4516 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4517 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4518 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4519 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4520 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4521 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4522 };
4523
4524 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4525 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4526 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4527 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4528 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4529 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4530 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4531 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4532 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4533 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4534 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4535 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4536 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4537 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4538 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4539 };
4540
4541 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4542 { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4543 { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4544 { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4545 { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4546 { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4547 { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4548 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4549 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4550 { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4551 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4552 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4553 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4554 { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4555 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4556 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4557 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4558 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4559 { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4560 { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4561 { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4562 { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4563 { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4564 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4565 { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4566 { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4567 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4568 { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4569 { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4570 { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4571 { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4572 { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4573 { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4574 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4575 };
4576
gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device * adev)4577 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4578 {
4579 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4580 int i, r;
4581
4582 /* only support when RAS is enabled */
4583 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4584 return 0;
4585
4586 r = amdgpu_ring_alloc(ring, 7);
4587 if (r) {
4588 drm_err(adev_to_drm(adev), "GDS workarounds failed to lock ring %s (%d).\n",
4589 ring->name, r);
4590 return r;
4591 }
4592
4593 WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4594 WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4595
4596 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4597 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4598 PACKET3_DMA_DATA_DST_SEL(1) |
4599 PACKET3_DMA_DATA_SRC_SEL(2) |
4600 PACKET3_DMA_DATA_ENGINE(0)));
4601 amdgpu_ring_write(ring, 0);
4602 amdgpu_ring_write(ring, 0);
4603 amdgpu_ring_write(ring, 0);
4604 amdgpu_ring_write(ring, 0);
4605 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4606 adev->gds.gds_size);
4607
4608 amdgpu_ring_commit(ring);
4609
4610 for (i = 0; i < adev->usec_timeout; i++) {
4611 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4612 break;
4613 udelay(1);
4614 }
4615
4616 if (i >= adev->usec_timeout)
4617 r = -ETIMEDOUT;
4618
4619 WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4620
4621 return r;
4622 }
4623
gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device * adev)4624 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4625 {
4626 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4627 struct amdgpu_ib ib;
4628 struct dma_fence *f = NULL;
4629 int r, i;
4630 unsigned total_size, vgpr_offset, sgpr_offset;
4631 u64 gpu_addr;
4632
4633 int compute_dim_x = adev->gfx.config.max_shader_engines *
4634 adev->gfx.config.max_cu_per_sh *
4635 adev->gfx.config.max_sh_per_se;
4636 int sgpr_work_group_size = 5;
4637 int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4638 int vgpr_init_shader_size;
4639 const u32 *vgpr_init_shader_ptr;
4640 const struct soc15_reg_entry *vgpr_init_regs_ptr;
4641
4642 /* only support when RAS is enabled */
4643 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4644 return 0;
4645
4646 /* bail if the compute ring is not ready */
4647 if (!ring->sched.ready)
4648 return 0;
4649
4650 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
4651 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4652 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4653 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4654 } else {
4655 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4656 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4657 vgpr_init_regs_ptr = vgpr_init_regs;
4658 }
4659
4660 total_size =
4661 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4662 total_size +=
4663 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4664 total_size +=
4665 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4666 total_size = ALIGN(total_size, 256);
4667 vgpr_offset = total_size;
4668 total_size += ALIGN(vgpr_init_shader_size, 256);
4669 sgpr_offset = total_size;
4670 total_size += sizeof(sgpr_init_compute_shader);
4671
4672 /* allocate an indirect buffer to put the commands in */
4673 memset(&ib, 0, sizeof(ib));
4674 r = amdgpu_ib_get(adev, NULL, total_size,
4675 AMDGPU_IB_POOL_DIRECT, &ib);
4676 if (r) {
4677 drm_err(adev_to_drm(adev), "failed to get ib (%d).\n", r);
4678 return r;
4679 }
4680
4681 /* load the compute shaders */
4682 for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4683 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4684
4685 for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4686 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4687
4688 /* init the ib length to 0 */
4689 ib.length_dw = 0;
4690
4691 /* VGPR */
4692 /* write the register state for the compute dispatch */
4693 for (i = 0; i < gpr_reg_size; i++) {
4694 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4695 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4696 - PACKET3_SET_SH_REG_START;
4697 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4698 }
4699 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4700 gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4701 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4702 ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4703 - PACKET3_SET_SH_REG_START;
4704 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4705 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4706
4707 /* write dispatch packet */
4708 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4709 ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4710 ib.ptr[ib.length_dw++] = 1; /* y */
4711 ib.ptr[ib.length_dw++] = 1; /* z */
4712 ib.ptr[ib.length_dw++] =
4713 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4714
4715 /* write CS partial flush packet */
4716 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4717 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4718
4719 /* SGPR1 */
4720 /* write the register state for the compute dispatch */
4721 for (i = 0; i < gpr_reg_size; i++) {
4722 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4723 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4724 - PACKET3_SET_SH_REG_START;
4725 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4726 }
4727 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4728 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4729 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4730 ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4731 - PACKET3_SET_SH_REG_START;
4732 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4733 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4734
4735 /* write dispatch packet */
4736 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4737 ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4738 ib.ptr[ib.length_dw++] = 1; /* y */
4739 ib.ptr[ib.length_dw++] = 1; /* z */
4740 ib.ptr[ib.length_dw++] =
4741 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4742
4743 /* write CS partial flush packet */
4744 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4745 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4746
4747 /* SGPR2 */
4748 /* write the register state for the compute dispatch */
4749 for (i = 0; i < gpr_reg_size; i++) {
4750 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4751 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4752 - PACKET3_SET_SH_REG_START;
4753 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4754 }
4755 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4756 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4757 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4758 ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4759 - PACKET3_SET_SH_REG_START;
4760 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4761 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4762
4763 /* write dispatch packet */
4764 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4765 ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4766 ib.ptr[ib.length_dw++] = 1; /* y */
4767 ib.ptr[ib.length_dw++] = 1; /* z */
4768 ib.ptr[ib.length_dw++] =
4769 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4770
4771 /* write CS partial flush packet */
4772 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4773 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4774
4775 /* shedule the ib on the ring */
4776 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4777 if (r) {
4778 drm_err(adev_to_drm(adev), "ib schedule failed (%d).\n", r);
4779 goto fail;
4780 }
4781
4782 /* wait for the GPU to finish processing the IB */
4783 r = dma_fence_wait(f, false);
4784 if (r) {
4785 drm_err(adev_to_drm(adev), "fence wait failed (%d).\n", r);
4786 goto fail;
4787 }
4788
4789 fail:
4790 amdgpu_ib_free(&ib, NULL);
4791 dma_fence_put(f);
4792
4793 return r;
4794 }
4795
gfx_v9_0_early_init(struct amdgpu_ip_block * ip_block)4796 static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block)
4797 {
4798 struct amdgpu_device *adev = ip_block->adev;
4799
4800 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
4801
4802 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
4803 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4804 adev->gfx.num_gfx_rings = 0;
4805 else
4806 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4807 adev->gfx.xcc_mask = 1;
4808 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4809 AMDGPU_MAX_COMPUTE_RINGS);
4810 gfx_v9_0_set_kiq_pm4_funcs(adev);
4811 gfx_v9_0_set_ring_funcs(adev);
4812 gfx_v9_0_set_irq_funcs(adev);
4813 gfx_v9_0_set_gds_init(adev);
4814 gfx_v9_0_set_rlc_funcs(adev);
4815
4816 /* init rlcg reg access ctrl */
4817 gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
4818
4819 return gfx_v9_0_init_microcode(adev);
4820 }
4821
gfx_v9_0_ecc_late_init(struct amdgpu_ip_block * ip_block)4822 static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block)
4823 {
4824 struct amdgpu_device *adev = ip_block->adev;
4825 int r;
4826
4827 /*
4828 * Temp workaround to fix the issue that CP firmware fails to
4829 * update read pointer when CPDMA is writing clearing operation
4830 * to GDS in suspend/resume sequence on several cards. So just
4831 * limit this operation in cold boot sequence.
4832 */
4833 if ((!adev->in_suspend) &&
4834 (adev->gds.gds_size)) {
4835 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4836 if (r)
4837 return r;
4838 }
4839
4840 /* requires IBs so do in late init after IB pool is initialized */
4841 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4842 r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
4843 else
4844 r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4845
4846 if (r)
4847 return r;
4848
4849 if (adev->gfx.ras &&
4850 adev->gfx.ras->enable_watchdog_timer)
4851 adev->gfx.ras->enable_watchdog_timer(adev);
4852
4853 return 0;
4854 }
4855
gfx_v9_0_late_init(struct amdgpu_ip_block * ip_block)4856 static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block)
4857 {
4858 struct amdgpu_device *adev = ip_block->adev;
4859 int r;
4860
4861 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4862 if (r)
4863 return r;
4864
4865 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4866 if (r)
4867 return r;
4868
4869 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
4870 if (r)
4871 return r;
4872
4873 r = gfx_v9_0_ecc_late_init(ip_block);
4874 if (r)
4875 return r;
4876
4877 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
4878 gfx_v9_4_2_debug_trap_config_init(adev,
4879 adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
4880 else
4881 gfx_v9_0_debug_trap_config_init(adev,
4882 adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
4883
4884 return 0;
4885 }
4886
gfx_v9_0_is_rlc_enabled(struct amdgpu_device * adev)4887 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4888 {
4889 uint32_t rlc_setting;
4890
4891 /* if RLC is not enabled, do nothing */
4892 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4893 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4894 return false;
4895
4896 return true;
4897 }
4898
gfx_v9_0_set_safe_mode(struct amdgpu_device * adev,int xcc_id)4899 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
4900 {
4901 uint32_t data;
4902 unsigned i;
4903
4904 data = RLC_SAFE_MODE__CMD_MASK;
4905 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4906 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4907
4908 /* wait for RLC_SAFE_MODE */
4909 for (i = 0; i < adev->usec_timeout; i++) {
4910 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4911 break;
4912 udelay(1);
4913 }
4914 }
4915
gfx_v9_0_unset_safe_mode(struct amdgpu_device * adev,int xcc_id)4916 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
4917 {
4918 uint32_t data;
4919
4920 data = RLC_SAFE_MODE__CMD_MASK;
4921 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4922 }
4923
gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device * adev,bool enable)4924 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4925 bool enable)
4926 {
4927 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4928
4929 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4930 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4931 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4932 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4933 } else {
4934 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4935 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4936 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4937 }
4938
4939 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4940 }
4941
gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device * adev,bool enable)4942 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4943 bool enable)
4944 {
4945 /* TODO: double check if we need to perform under safe mode */
4946 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4947
4948 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4949 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4950 else
4951 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4952
4953 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4954 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4955 else
4956 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4957
4958 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4959 }
4960
gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)4961 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4962 bool enable)
4963 {
4964 uint32_t data, def;
4965
4966 /* It is disabled by HW by default */
4967 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4968 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4969 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4970
4971 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
4972 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4973
4974 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4975 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4976 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4977
4978 /* only for Vega10 & Raven1 */
4979 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4980
4981 if (def != data)
4982 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4983
4984 /* MGLS is a global flag to control all MGLS in GFX */
4985 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4986 /* 2 - RLC memory Light sleep */
4987 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4988 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4989 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4990 if (def != data)
4991 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4992 }
4993 /* 3 - CP memory Light sleep */
4994 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4995 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4996 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4997 if (def != data)
4998 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4999 }
5000 }
5001 } else {
5002 /* 1 - MGCG_OVERRIDE */
5003 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5004
5005 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
5006 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
5007
5008 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5009 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5010 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
5011 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
5012
5013 if (def != data)
5014 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5015
5016 /* 2 - disable MGLS in RLC */
5017 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
5018 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5019 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5020 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
5021 }
5022
5023 /* 3 - disable MGLS in CP */
5024 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
5025 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5026 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5027 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
5028 }
5029 }
5030 }
5031
gfx_v9_0_update_3d_clock_gating(struct amdgpu_device * adev,bool enable)5032 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
5033 bool enable)
5034 {
5035 uint32_t data, def;
5036
5037 if (!adev->gfx.num_gfx_rings)
5038 return;
5039
5040 /* Enable 3D CGCG/CGLS */
5041 if (enable) {
5042 /* write cmd to clear cgcg/cgls ov */
5043 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5044 /* unset CGCG override */
5045 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5046 /* update CGCG and CGLS override bits */
5047 if (def != data)
5048 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5049
5050 /* enable 3Dcgcg FSM(0x0000363f) */
5051 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5052
5053 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5054 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5055 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5056 else
5057 data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
5058
5059 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5060 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5061 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5062 if (def != data)
5063 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5064
5065 /* set IDLE_POLL_COUNT(0x00900100) */
5066 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5067 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5068 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5069 if (def != data)
5070 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5071 } else {
5072 /* Disable CGCG/CGLS */
5073 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
5074 /* disable cgcg, cgls should be disabled */
5075 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
5076 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
5077 /* disable cgcg and cgls in FSM */
5078 if (def != data)
5079 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
5080 }
5081 }
5082
gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device * adev,bool enable)5083 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5084 bool enable)
5085 {
5086 uint32_t def, data;
5087
5088 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5089 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5090 /* unset CGCG override */
5091 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5092 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5093 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5094 else
5095 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5096 /* update CGCG and CGLS override bits */
5097 if (def != data)
5098 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5099
5100 /* enable cgcg FSM(0x0000363F) */
5101 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5102
5103 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))
5104 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5105 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5106 else
5107 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5108 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5109 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5110 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5111 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5112 if (def != data)
5113 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5114
5115 /* set IDLE_POLL_COUNT(0x00900100) */
5116 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5117 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5118 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5119 if (def != data)
5120 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5121 } else {
5122 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5123 /* reset CGCG/CGLS bits */
5124 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5125 /* disable cgcg and cgls in FSM */
5126 if (def != data)
5127 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5128 }
5129 }
5130
gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device * adev,bool enable)5131 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5132 bool enable)
5133 {
5134 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5135 if (enable) {
5136 /* CGCG/CGLS should be enabled after MGCG/MGLS
5137 * === MGCG + MGLS ===
5138 */
5139 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5140 /* === CGCG /CGLS for GFX 3D Only === */
5141 gfx_v9_0_update_3d_clock_gating(adev, enable);
5142 /* === CGCG + CGLS === */
5143 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5144 } else {
5145 /* CGCG/CGLS should be disabled before MGCG/MGLS
5146 * === CGCG + CGLS ===
5147 */
5148 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5149 /* === CGCG /CGLS for GFX 3D Only === */
5150 gfx_v9_0_update_3d_clock_gating(adev, enable);
5151 /* === MGCG + MGLS === */
5152 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5153 }
5154 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5155 return 0;
5156 }
5157
gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device * adev,unsigned int vmid)5158 static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
5159 unsigned int vmid)
5160 {
5161 u32 reg, data;
5162
5163 reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5164 if (amdgpu_sriov_is_pp_one_vf(adev))
5165 data = RREG32_NO_KIQ(reg);
5166 else
5167 data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
5168
5169 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5170 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5171
5172 if (amdgpu_sriov_is_pp_one_vf(adev))
5173 WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5174 else
5175 WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5176 }
5177
gfx_v9_0_update_spm_vmid(struct amdgpu_device * adev,int xcc_id,struct amdgpu_ring * ring,unsigned int vmid)5178 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, int xcc_id,
5179 struct amdgpu_ring *ring, unsigned int vmid)
5180 {
5181 amdgpu_gfx_off_ctrl(adev, false);
5182
5183 gfx_v9_0_update_spm_vmid_internal(adev, vmid);
5184
5185 amdgpu_gfx_off_ctrl(adev, true);
5186 }
5187
gfx_v9_0_check_rlcg_range(struct amdgpu_device * adev,uint32_t offset,struct soc15_reg_rlcg * entries,int arr_size)5188 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5189 uint32_t offset,
5190 struct soc15_reg_rlcg *entries, int arr_size)
5191 {
5192 int i;
5193 uint32_t reg;
5194
5195 if (!entries)
5196 return false;
5197
5198 for (i = 0; i < arr_size; i++) {
5199 const struct soc15_reg_rlcg *entry;
5200
5201 entry = &entries[i];
5202 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5203 if (offset == reg)
5204 return true;
5205 }
5206
5207 return false;
5208 }
5209
gfx_v9_0_is_rlcg_access_range(struct amdgpu_device * adev,u32 offset)5210 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5211 {
5212 return gfx_v9_0_check_rlcg_range(adev, offset,
5213 (void *)rlcg_access_gc_9_0,
5214 ARRAY_SIZE(rlcg_access_gc_9_0));
5215 }
5216
5217 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5218 .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5219 .set_safe_mode = gfx_v9_0_set_safe_mode,
5220 .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5221 .init = gfx_v9_0_rlc_init,
5222 .get_csb_size = gfx_v9_0_get_csb_size,
5223 .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5224 .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5225 .resume = gfx_v9_0_rlc_resume,
5226 .stop = gfx_v9_0_rlc_stop,
5227 .reset = gfx_v9_0_rlc_reset,
5228 .start = gfx_v9_0_rlc_start,
5229 .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5230 .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5231 };
5232
gfx_v9_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)5233 static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
5234 enum amd_powergating_state state)
5235 {
5236 struct amdgpu_device *adev = ip_block->adev;
5237 bool enable = (state == AMD_PG_STATE_GATE);
5238
5239 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5240 case IP_VERSION(9, 2, 2):
5241 case IP_VERSION(9, 1, 0):
5242 case IP_VERSION(9, 3, 0):
5243 if (!enable)
5244 amdgpu_gfx_off_ctrl_immediate(adev, false);
5245
5246 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5247 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5248 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5249 } else {
5250 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5251 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5252 }
5253
5254 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5255 gfx_v9_0_enable_cp_power_gating(adev, true);
5256 else
5257 gfx_v9_0_enable_cp_power_gating(adev, false);
5258
5259 /* update gfx cgpg state */
5260 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5261
5262 /* update mgcg state */
5263 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5264
5265 if (enable)
5266 amdgpu_gfx_off_ctrl_immediate(adev, true);
5267 break;
5268 case IP_VERSION(9, 2, 1):
5269 amdgpu_gfx_off_ctrl_immediate(adev, enable);
5270 break;
5271 default:
5272 break;
5273 }
5274
5275 return 0;
5276 }
5277
gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)5278 static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
5279 enum amd_clockgating_state state)
5280 {
5281 struct amdgpu_device *adev = ip_block->adev;
5282
5283 if (amdgpu_sriov_vf(adev))
5284 return 0;
5285
5286 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5287 case IP_VERSION(9, 0, 1):
5288 case IP_VERSION(9, 2, 1):
5289 case IP_VERSION(9, 4, 0):
5290 case IP_VERSION(9, 2, 2):
5291 case IP_VERSION(9, 1, 0):
5292 case IP_VERSION(9, 4, 1):
5293 case IP_VERSION(9, 3, 0):
5294 case IP_VERSION(9, 4, 2):
5295 gfx_v9_0_update_gfx_clock_gating(adev,
5296 state == AMD_CG_STATE_GATE);
5297 break;
5298 default:
5299 break;
5300 }
5301 return 0;
5302 }
5303
gfx_v9_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)5304 static void gfx_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
5305 {
5306 struct amdgpu_device *adev = ip_block->adev;
5307 int data;
5308
5309 if (amdgpu_sriov_vf(adev))
5310 *flags = 0;
5311
5312 /* AMD_CG_SUPPORT_GFX_MGCG */
5313 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5314 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5315 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5316
5317 /* AMD_CG_SUPPORT_GFX_CGCG */
5318 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5319 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5320 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5321
5322 /* AMD_CG_SUPPORT_GFX_CGLS */
5323 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5324 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5325
5326 /* AMD_CG_SUPPORT_GFX_RLC_LS */
5327 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5328 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5329 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5330
5331 /* AMD_CG_SUPPORT_GFX_CP_LS */
5332 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5333 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5334 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5335
5336 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) {
5337 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5338 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5339 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5340 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5341
5342 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5343 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5344 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5345 }
5346 }
5347
gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring * ring)5348 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5349 {
5350 return *ring->rptr_cpu_addr; /* gfx9 is 32bit rptr*/
5351 }
5352
gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring * ring)5353 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5354 {
5355 struct amdgpu_device *adev = ring->adev;
5356 u64 wptr;
5357
5358 /* XXX check if swapping is necessary on BE */
5359 if (ring->use_doorbell) {
5360 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5361 } else {
5362 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5363 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5364 }
5365
5366 return wptr;
5367 }
5368
gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring * ring)5369 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5370 {
5371 struct amdgpu_device *adev = ring->adev;
5372
5373 if (ring->use_doorbell) {
5374 /* XXX check if swapping is necessary on BE */
5375 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5376 WDOORBELL64(ring->doorbell_index, ring->wptr);
5377 } else {
5378 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5379 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5380 }
5381 }
5382
gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)5383 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5384 {
5385 struct amdgpu_device *adev = ring->adev;
5386 u32 ref_and_mask, reg_mem_engine;
5387
5388 if (!adev->gfx.funcs->get_hdp_flush_mask) {
5389 dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
5390 return;
5391 }
5392
5393 adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, ®_mem_engine);
5394 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5395 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5396 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5397 ref_and_mask, ref_and_mask, 0x20);
5398 }
5399
gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)5400 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5401 struct amdgpu_job *job,
5402 struct amdgpu_ib *ib,
5403 uint32_t flags)
5404 {
5405 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5406 u32 header, control = 0;
5407
5408 if (ib->flags & AMDGPU_IB_FLAG_CE)
5409 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5410 else
5411 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5412
5413 control |= ib->length_dw | (vmid << 24);
5414
5415 if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
5416 control |= INDIRECT_BUFFER_PRE_ENB(1);
5417
5418 if (flags & AMDGPU_IB_PREEMPTED)
5419 control |= INDIRECT_BUFFER_PRE_RESUME(1);
5420
5421 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5422 gfx_v9_0_ring_emit_de_meta(ring,
5423 (!amdgpu_sriov_vf(ring->adev) &&
5424 flags & AMDGPU_IB_PREEMPTED) ?
5425 true : false,
5426 job->gds_size > 0 && job->gds_base != 0);
5427 }
5428
5429 amdgpu_ring_write(ring, header);
5430 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5431 amdgpu_ring_write(ring,
5432 #ifdef __BIG_ENDIAN
5433 (2 << 0) |
5434 #endif
5435 lower_32_bits(ib->gpu_addr));
5436 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5437 amdgpu_ring_ib_on_emit_cntl(ring);
5438 amdgpu_ring_write(ring, control);
5439 }
5440
gfx_v9_0_ring_patch_cntl(struct amdgpu_ring * ring,unsigned offset)5441 static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
5442 unsigned offset)
5443 {
5444 u32 control = ring->ring[offset];
5445
5446 control |= INDIRECT_BUFFER_PRE_RESUME(1);
5447 ring->ring[offset] = control;
5448 }
5449
gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring * ring,unsigned offset)5450 static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
5451 unsigned offset)
5452 {
5453 struct amdgpu_device *adev = ring->adev;
5454 void *ce_payload_cpu_addr;
5455 uint64_t payload_offset, payload_size;
5456
5457 payload_size = sizeof(struct v9_ce_ib_state);
5458
5459 payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
5460 ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
5461
5462 if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
5463 memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
5464 } else {
5465 memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
5466 (ring->buf_mask + 1 - offset) << 2);
5467 payload_size -= (ring->buf_mask + 1 - offset) << 2;
5468 memcpy((void *)&ring->ring[0],
5469 ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
5470 payload_size);
5471 }
5472 }
5473
gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring * ring,unsigned offset)5474 static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
5475 unsigned offset)
5476 {
5477 struct amdgpu_device *adev = ring->adev;
5478 void *de_payload_cpu_addr;
5479 uint64_t payload_offset, payload_size;
5480
5481 payload_size = sizeof(struct v9_de_ib_state);
5482
5483 payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
5484 de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
5485
5486 ((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
5487 IB_COMPLETION_STATUS_PREEMPTED;
5488
5489 if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
5490 memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
5491 } else {
5492 memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
5493 (ring->buf_mask + 1 - offset) << 2);
5494 payload_size -= (ring->buf_mask + 1 - offset) << 2;
5495 memcpy((void *)&ring->ring[0],
5496 de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
5497 payload_size);
5498 }
5499 }
5500
gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)5501 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5502 struct amdgpu_job *job,
5503 struct amdgpu_ib *ib,
5504 uint32_t flags)
5505 {
5506 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5507 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5508
5509 /* Currently, there is a high possibility to get wave ID mismatch
5510 * between ME and GDS, leading to a hw deadlock, because ME generates
5511 * different wave IDs than the GDS expects. This situation happens
5512 * randomly when at least 5 compute pipes use GDS ordered append.
5513 * The wave IDs generated by ME are also wrong after suspend/resume.
5514 * Those are probably bugs somewhere else in the kernel driver.
5515 *
5516 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5517 * GDS to 0 for this ring (me/pipe).
5518 */
5519 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5520 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5521 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5522 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5523 }
5524
5525 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5526 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5527 amdgpu_ring_write(ring,
5528 #ifdef __BIG_ENDIAN
5529 (2 << 0) |
5530 #endif
5531 lower_32_bits(ib->gpu_addr));
5532 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5533 amdgpu_ring_write(ring, control);
5534 }
5535
gfx_v9_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)5536 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5537 u64 seq, unsigned flags)
5538 {
5539 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5540 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5541 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5542 bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
5543 uint32_t dw2 = 0;
5544
5545 /* RELEASE_MEM - flush caches, send int */
5546 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5547
5548 if (writeback) {
5549 dw2 = EOP_TC_NC_ACTION_EN;
5550 } else {
5551 dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN |
5552 EOP_TC_MD_ACTION_EN;
5553 }
5554 dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5555 EVENT_INDEX(5);
5556 if (exec)
5557 dw2 |= EOP_EXEC;
5558
5559 amdgpu_ring_write(ring, dw2);
5560 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5561
5562 /*
5563 * the address should be Qword aligned if 64bit write, Dword
5564 * aligned if only send 32bit data low (discard data high)
5565 */
5566 if (write64bit)
5567 BUG_ON(addr & 0x7);
5568 else
5569 BUG_ON(addr & 0x3);
5570 amdgpu_ring_write(ring, lower_32_bits(addr));
5571 amdgpu_ring_write(ring, upper_32_bits(addr));
5572 amdgpu_ring_write(ring, lower_32_bits(seq));
5573 amdgpu_ring_write(ring, upper_32_bits(seq));
5574 amdgpu_ring_write(ring, 0);
5575 }
5576
gfx_v9_0_ring_emit_event_write(struct amdgpu_ring * ring,uint32_t event_type,uint32_t event_index)5577 static void gfx_v9_0_ring_emit_event_write(struct amdgpu_ring *ring,
5578 uint32_t event_type,
5579 uint32_t event_index)
5580 {
5581 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
5582 amdgpu_ring_write(ring, EVENT_TYPE(event_type) |
5583 EVENT_INDEX(event_index));
5584 }
5585
gfx_v9_0_emit_mem_sync(struct amdgpu_ring * ring)5586 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
5587 {
5588 const unsigned int cp_coher_cntl =
5589 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
5590 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
5591 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
5592 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
5593 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
5594
5595 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
5596 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
5597 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
5598 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
5599 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
5600 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
5601 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
5602 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
5603 }
5604
gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring * ring)5605 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5606 {
5607 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5608 gfx_v9_0_ring_emit_event_write(ring, VS_PARTIAL_FLUSH, 4);
5609 gfx_v9_0_ring_emit_event_write(ring, PS_PARTIAL_FLUSH, 4);
5610 }
5611 gfx_v9_0_ring_emit_event_write(ring, CS_PARTIAL_FLUSH, 4);
5612 gfx_v9_0_emit_mem_sync(ring);
5613 }
5614
gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)5615 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5616 unsigned vmid, uint64_t pd_addr)
5617 {
5618 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5619
5620 /* compute doesn't have PFP */
5621 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5622 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5623 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5624 amdgpu_ring_write(ring, 0x0);
5625 }
5626 }
5627
gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring * ring)5628 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5629 {
5630 return *ring->rptr_cpu_addr; /* gfx9 hardware is 32bit rptr */
5631 }
5632
gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring * ring)5633 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5634 {
5635 u64 wptr;
5636
5637 /* XXX check if swapping is necessary on BE */
5638 if (ring->use_doorbell)
5639 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5640 else
5641 BUG();
5642 return wptr;
5643 }
5644
gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring * ring)5645 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5646 {
5647 struct amdgpu_device *adev = ring->adev;
5648
5649 /* XXX check if swapping is necessary on BE */
5650 if (ring->use_doorbell) {
5651 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5652 WDOORBELL64(ring->doorbell_index, ring->wptr);
5653 } else{
5654 BUG(); /* only DOORBELL method supported on gfx9 now */
5655 }
5656 }
5657
gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)5658 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5659 u64 seq, unsigned int flags)
5660 {
5661 struct amdgpu_device *adev = ring->adev;
5662
5663 /* write fence seq to the "addr" */
5664 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5665 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5666 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5667 amdgpu_ring_write(ring, lower_32_bits(addr));
5668 amdgpu_ring_write(ring, upper_32_bits(addr));
5669 amdgpu_ring_write(ring, lower_32_bits(seq));
5670
5671 if (flags & AMDGPU_FENCE_FLAG_INT) {
5672 /* set register to trigger INT */
5673 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5674 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5675 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5676 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5677 amdgpu_ring_write(ring, 0);
5678 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5679 }
5680 }
5681
gfx_v9_ring_emit_sb(struct amdgpu_ring * ring)5682 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5683 {
5684 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5685 amdgpu_ring_write(ring, 0);
5686 }
5687
gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring * ring,bool resume)5688 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
5689 {
5690 struct amdgpu_device *adev = ring->adev;
5691 struct v9_ce_ib_state ce_payload = {0};
5692 uint64_t offset, ce_payload_gpu_addr;
5693 void *ce_payload_cpu_addr;
5694 int cnt;
5695
5696 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5697
5698 offset = offsetof(struct v9_gfx_meta_data, ce_payload);
5699 ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5700 ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5701
5702 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5703 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5704 WRITE_DATA_DST_SEL(8) |
5705 WR_CONFIRM) |
5706 WRITE_DATA_CACHE_POLICY(0));
5707 amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
5708 amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));
5709
5710 amdgpu_ring_ib_on_emit_ce(ring);
5711
5712 if (resume)
5713 amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
5714 sizeof(ce_payload) >> 2);
5715 else
5716 amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
5717 sizeof(ce_payload) >> 2);
5718 }
5719
gfx_v9_0_ring_preempt_ib(struct amdgpu_ring * ring)5720 static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
5721 {
5722 int i, r = 0;
5723 struct amdgpu_device *adev = ring->adev;
5724 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5725 struct amdgpu_ring *kiq_ring = &kiq->ring;
5726 unsigned long flags;
5727
5728 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5729 return -EINVAL;
5730
5731 spin_lock_irqsave(&kiq->ring_lock, flags);
5732
5733 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5734 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5735 return -ENOMEM;
5736 }
5737
5738 /* assert preemption condition */
5739 amdgpu_ring_set_preempt_cond_exec(ring, false);
5740
5741 ring->trail_seq += 1;
5742 amdgpu_ring_alloc(ring, 13);
5743 gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
5744 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);
5745
5746 /* assert IB preemption, emit the trailing fence */
5747 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5748 ring->trail_fence_gpu_addr,
5749 ring->trail_seq);
5750
5751 amdgpu_ring_commit(kiq_ring);
5752 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5753
5754 /* poll the trailing fence */
5755 for (i = 0; i < adev->usec_timeout; i++) {
5756 if (ring->trail_seq ==
5757 le32_to_cpu(*ring->trail_fence_cpu_addr))
5758 break;
5759 udelay(1);
5760 }
5761
5762 if (i >= adev->usec_timeout) {
5763 r = -EINVAL;
5764 drm_warn(adev_to_drm(adev), "ring %d timeout to preempt ib\n", ring->idx);
5765 }
5766
5767 /*reset the CP_VMID_PREEMPT after trailing fence*/
5768 amdgpu_ring_emit_wreg(ring,
5769 SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
5770 0x0);
5771 amdgpu_ring_commit(ring);
5772
5773 /* deassert preemption condition */
5774 amdgpu_ring_set_preempt_cond_exec(ring, true);
5775 return r;
5776 }
5777
gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring * ring,bool resume,bool usegds)5778 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
5779 {
5780 struct amdgpu_device *adev = ring->adev;
5781 struct v9_de_ib_state de_payload = {0};
5782 uint64_t offset, gds_addr, de_payload_gpu_addr;
5783 void *de_payload_cpu_addr;
5784 int cnt;
5785
5786 offset = offsetof(struct v9_gfx_meta_data, de_payload);
5787 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5788 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5789
5790 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5791 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5792 PAGE_SIZE);
5793
5794 if (usegds) {
5795 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5796 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5797 }
5798
5799 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5800 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5801 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5802 WRITE_DATA_DST_SEL(8) |
5803 WR_CONFIRM) |
5804 WRITE_DATA_CACHE_POLICY(0));
5805 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5806 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5807
5808 amdgpu_ring_ib_on_emit_de(ring);
5809 if (resume)
5810 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5811 sizeof(de_payload) >> 2);
5812 else
5813 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5814 sizeof(de_payload) >> 2);
5815 }
5816
gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring * ring,bool start,bool secure)5817 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5818 bool secure)
5819 {
5820 uint32_t v = secure ? FRAME_TMZ : 0;
5821
5822 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5823 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5824 }
5825
gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring * ring,uint32_t flags)5826 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5827 {
5828 uint32_t dw2 = 0;
5829
5830 gfx_v9_0_ring_emit_ce_meta(ring,
5831 (!amdgpu_sriov_vf(ring->adev) &&
5832 flags & AMDGPU_IB_PREEMPTED) ? true : false);
5833
5834 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5835 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5836 /* set load_global_config & load_global_uconfig */
5837 dw2 |= 0x8001;
5838 /* set load_cs_sh_regs */
5839 dw2 |= 0x01000000;
5840 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5841 dw2 |= 0x10002;
5842
5843 /* set load_ce_ram if preamble presented */
5844 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5845 dw2 |= 0x10000000;
5846 } else {
5847 /* still load_ce_ram if this is the first time preamble presented
5848 * although there is no context switch happens.
5849 */
5850 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5851 dw2 |= 0x10000000;
5852 }
5853
5854 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5855 amdgpu_ring_write(ring, dw2);
5856 amdgpu_ring_write(ring, 0);
5857 }
5858
gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring * ring,uint64_t addr)5859 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
5860 uint64_t addr)
5861 {
5862 unsigned ret;
5863 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5864 amdgpu_ring_write(ring, lower_32_bits(addr));
5865 amdgpu_ring_write(ring, upper_32_bits(addr));
5866 /* discard following DWs if *cond_exec_gpu_addr==0 */
5867 amdgpu_ring_write(ring, 0);
5868 ret = ring->wptr & ring->buf_mask;
5869 /* patch dummy value later */
5870 amdgpu_ring_write(ring, 0);
5871 return ret;
5872 }
5873
gfx_v9_0_ring_emit_rreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t reg_val_offs)5874 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5875 uint32_t reg_val_offs)
5876 {
5877 struct amdgpu_device *adev = ring->adev;
5878
5879 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5880 amdgpu_ring_write(ring, 0 | /* src: register*/
5881 (5 << 8) | /* dst: memory */
5882 (1 << 20)); /* write confirm */
5883 amdgpu_ring_write(ring, reg);
5884 amdgpu_ring_write(ring, 0);
5885 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5886 reg_val_offs * 4));
5887 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5888 reg_val_offs * 4));
5889 }
5890
gfx_v9_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)5891 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5892 uint32_t val)
5893 {
5894 uint32_t cmd = 0;
5895
5896 switch (ring->funcs->type) {
5897 case AMDGPU_RING_TYPE_GFX:
5898 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5899 break;
5900 case AMDGPU_RING_TYPE_KIQ:
5901 cmd = (1 << 16); /* no inc addr */
5902 break;
5903 default:
5904 cmd = WR_CONFIRM;
5905 break;
5906 }
5907 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5908 amdgpu_ring_write(ring, cmd);
5909 amdgpu_ring_write(ring, reg);
5910 amdgpu_ring_write(ring, 0);
5911 amdgpu_ring_write(ring, val);
5912 }
5913
gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)5914 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5915 uint32_t val, uint32_t mask)
5916 {
5917 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5918 }
5919
gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)5920 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5921 uint32_t reg0, uint32_t reg1,
5922 uint32_t ref, uint32_t mask)
5923 {
5924 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5925 struct amdgpu_device *adev = ring->adev;
5926 bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5927 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5928
5929 if (fw_version_ok)
5930 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5931 ref, mask, 0x20);
5932 else
5933 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5934 ref, mask);
5935 }
5936
gfx_v9_0_ring_soft_recovery(struct amdgpu_ring * ring,unsigned vmid)5937 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5938 {
5939 struct amdgpu_device *adev = ring->adev;
5940 uint32_t value = 0;
5941
5942 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5943 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5944 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5945 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5946 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5947 WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5948 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5949 }
5950
gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device * adev,enum amdgpu_interrupt_state state)5951 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5952 enum amdgpu_interrupt_state state)
5953 {
5954 switch (state) {
5955 case AMDGPU_IRQ_STATE_DISABLE:
5956 case AMDGPU_IRQ_STATE_ENABLE:
5957 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5958 TIME_STAMP_INT_ENABLE,
5959 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5960 break;
5961 default:
5962 break;
5963 }
5964 }
5965
gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int me,int pipe,enum amdgpu_interrupt_state state)5966 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5967 int me, int pipe,
5968 enum amdgpu_interrupt_state state)
5969 {
5970 u32 mec_int_cntl, mec_int_cntl_reg;
5971
5972 /*
5973 * amdgpu controls only the first MEC. That's why this function only
5974 * handles the setting of interrupts for this specific MEC. All other
5975 * pipes' interrupts are set by amdkfd.
5976 */
5977
5978 if (me == 1) {
5979 switch (pipe) {
5980 case 0:
5981 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5982 break;
5983 case 1:
5984 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5985 break;
5986 case 2:
5987 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5988 break;
5989 case 3:
5990 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5991 break;
5992 default:
5993 DRM_DEBUG("invalid pipe %d\n", pipe);
5994 return;
5995 }
5996 } else {
5997 DRM_DEBUG("invalid me %d\n", me);
5998 return;
5999 }
6000
6001 switch (state) {
6002 case AMDGPU_IRQ_STATE_DISABLE:
6003 mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
6004 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6005 TIME_STAMP_INT_ENABLE, 0);
6006 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6007 break;
6008 case AMDGPU_IRQ_STATE_ENABLE:
6009 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6010 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6011 TIME_STAMP_INT_ENABLE, 1);
6012 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6013 break;
6014 default:
6015 break;
6016 }
6017 }
6018
gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device * adev,int me,int pipe)6019 static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev,
6020 int me, int pipe)
6021 {
6022 /*
6023 * amdgpu controls only the first MEC. That's why this function only
6024 * handles the setting of interrupts for this specific MEC. All other
6025 * pipes' interrupts are set by amdkfd.
6026 */
6027 if (me != 1)
6028 return 0;
6029
6030 switch (pipe) {
6031 case 0:
6032 return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
6033 case 1:
6034 return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
6035 case 2:
6036 return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
6037 case 3:
6038 return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
6039 default:
6040 return 0;
6041 }
6042 }
6043
gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)6044 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6045 struct amdgpu_irq_src *source,
6046 unsigned type,
6047 enum amdgpu_interrupt_state state)
6048 {
6049 u32 cp_int_cntl_reg, cp_int_cntl;
6050 int i, j;
6051
6052 switch (state) {
6053 case AMDGPU_IRQ_STATE_DISABLE:
6054 case AMDGPU_IRQ_STATE_ENABLE:
6055 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6056 PRIV_REG_INT_ENABLE,
6057 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6058 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6059 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6060 /* MECs start at 1 */
6061 cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
6062
6063 if (cp_int_cntl_reg) {
6064 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6065 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6066 PRIV_REG_INT_ENABLE,
6067 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6068 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6069 }
6070 }
6071 }
6072 break;
6073 default:
6074 break;
6075 }
6076
6077 return 0;
6078 }
6079
gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)6080 static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev,
6081 struct amdgpu_irq_src *source,
6082 unsigned type,
6083 enum amdgpu_interrupt_state state)
6084 {
6085 u32 cp_int_cntl_reg, cp_int_cntl;
6086 int i, j;
6087
6088 switch (state) {
6089 case AMDGPU_IRQ_STATE_DISABLE:
6090 case AMDGPU_IRQ_STATE_ENABLE:
6091 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6092 OPCODE_ERROR_INT_ENABLE,
6093 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6094 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6095 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6096 /* MECs start at 1 */
6097 cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);
6098
6099 if (cp_int_cntl_reg) {
6100 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6101 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6102 OPCODE_ERROR_INT_ENABLE,
6103 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6104 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6105 }
6106 }
6107 }
6108 break;
6109 default:
6110 break;
6111 }
6112
6113 return 0;
6114 }
6115
gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)6116 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6117 struct amdgpu_irq_src *source,
6118 unsigned type,
6119 enum amdgpu_interrupt_state state)
6120 {
6121 switch (state) {
6122 case AMDGPU_IRQ_STATE_DISABLE:
6123 case AMDGPU_IRQ_STATE_ENABLE:
6124 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6125 PRIV_INSTR_INT_ENABLE,
6126 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6127 break;
6128 default:
6129 break;
6130 }
6131
6132 return 0;
6133 }
6134
6135 #define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
6136 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
6137 CP_ECC_ERROR_INT_ENABLE, 1)
6138
6139 #define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
6140 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
6141 CP_ECC_ERROR_INT_ENABLE, 0)
6142
gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)6143 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
6144 struct amdgpu_irq_src *source,
6145 unsigned type,
6146 enum amdgpu_interrupt_state state)
6147 {
6148 switch (state) {
6149 case AMDGPU_IRQ_STATE_DISABLE:
6150 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6151 CP_ECC_ERROR_INT_ENABLE, 0);
6152 DISABLE_ECC_ON_ME_PIPE(1, 0);
6153 DISABLE_ECC_ON_ME_PIPE(1, 1);
6154 DISABLE_ECC_ON_ME_PIPE(1, 2);
6155 DISABLE_ECC_ON_ME_PIPE(1, 3);
6156 break;
6157
6158 case AMDGPU_IRQ_STATE_ENABLE:
6159 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
6160 CP_ECC_ERROR_INT_ENABLE, 1);
6161 ENABLE_ECC_ON_ME_PIPE(1, 0);
6162 ENABLE_ECC_ON_ME_PIPE(1, 1);
6163 ENABLE_ECC_ON_ME_PIPE(1, 2);
6164 ENABLE_ECC_ON_ME_PIPE(1, 3);
6165 break;
6166 default:
6167 break;
6168 }
6169
6170 return 0;
6171 }
6172
6173
gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)6174 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6175 struct amdgpu_irq_src *src,
6176 unsigned type,
6177 enum amdgpu_interrupt_state state)
6178 {
6179 switch (type) {
6180 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6181 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
6182 break;
6183 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6184 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6185 break;
6186 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6187 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6188 break;
6189 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6190 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6191 break;
6192 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6193 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6194 break;
6195 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6196 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6197 break;
6198 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6199 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6200 break;
6201 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6202 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6203 break;
6204 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6205 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6206 break;
6207 default:
6208 break;
6209 }
6210 return 0;
6211 }
6212
gfx_v9_0_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6213 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
6214 struct amdgpu_irq_src *source,
6215 struct amdgpu_iv_entry *entry)
6216 {
6217 int i;
6218 u8 me_id, pipe_id, queue_id;
6219 struct amdgpu_ring *ring;
6220
6221 DRM_DEBUG("IH: CP EOP\n");
6222 me_id = (entry->ring_id & 0x0c) >> 2;
6223 pipe_id = (entry->ring_id & 0x03) >> 0;
6224 queue_id = (entry->ring_id & 0x70) >> 4;
6225
6226 switch (me_id) {
6227 case 0:
6228 if (adev->gfx.num_gfx_rings) {
6229 if (!adev->gfx.mcbp) {
6230 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6231 } else if (!amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) {
6232 /* Fence signals are handled on the software rings*/
6233 for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
6234 amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]);
6235 }
6236 }
6237 break;
6238 case 1:
6239 case 2:
6240 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6241 ring = &adev->gfx.compute_ring[i];
6242 /* Per-queue interrupt is supported for MEC starting from VI.
6243 * The interrupt can only be enabled/disabled per pipe instead of per queue.
6244 */
6245 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6246 amdgpu_fence_process(ring);
6247 }
6248 break;
6249 }
6250 return 0;
6251 }
6252
gfx_v9_0_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)6253 static void gfx_v9_0_fault(struct amdgpu_device *adev,
6254 struct amdgpu_iv_entry *entry)
6255 {
6256 u8 me_id, pipe_id, queue_id;
6257 struct amdgpu_ring *ring;
6258 int i;
6259
6260 me_id = (entry->ring_id & 0x0c) >> 2;
6261 pipe_id = (entry->ring_id & 0x03) >> 0;
6262 queue_id = (entry->ring_id & 0x70) >> 4;
6263
6264 switch (me_id) {
6265 case 0:
6266 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6267 break;
6268 case 1:
6269 case 2:
6270 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6271 ring = &adev->gfx.compute_ring[i];
6272 if (ring->me == me_id && ring->pipe == pipe_id &&
6273 ring->queue == queue_id)
6274 drm_sched_fault(&ring->sched);
6275 }
6276 break;
6277 }
6278 }
6279
gfx_v9_0_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6280 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
6281 struct amdgpu_irq_src *source,
6282 struct amdgpu_iv_entry *entry)
6283 {
6284 DRM_ERROR("Illegal register access in command stream\n");
6285 gfx_v9_0_fault(adev, entry);
6286 return 0;
6287 }
6288
gfx_v9_0_bad_op_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6289 static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev,
6290 struct amdgpu_irq_src *source,
6291 struct amdgpu_iv_entry *entry)
6292 {
6293 DRM_ERROR("Illegal opcode in command stream\n");
6294 gfx_v9_0_fault(adev, entry);
6295 return 0;
6296 }
6297
gfx_v9_0_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6298 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
6299 struct amdgpu_irq_src *source,
6300 struct amdgpu_iv_entry *entry)
6301 {
6302 DRM_ERROR("Illegal instruction in command stream\n");
6303 gfx_v9_0_fault(adev, entry);
6304 return 0;
6305 }
6306
6307
6308 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
6309 { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
6310 SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
6311 SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
6312 },
6313 { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
6314 SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
6315 SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
6316 },
6317 { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
6318 SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
6319 0, 0
6320 },
6321 { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
6322 SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
6323 0, 0
6324 },
6325 { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
6326 SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
6327 SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
6328 },
6329 { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
6330 SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
6331 0, 0
6332 },
6333 { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
6334 SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
6335 SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
6336 },
6337 { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
6338 SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
6339 SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
6340 },
6341 { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
6342 SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
6343 0, 0
6344 },
6345 { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
6346 SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
6347 0, 0
6348 },
6349 { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
6350 SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
6351 0, 0
6352 },
6353 { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6354 SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
6355 SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
6356 },
6357 { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6358 SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
6359 0, 0
6360 },
6361 { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6362 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
6363 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
6364 },
6365 { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
6366 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6367 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
6368 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
6369 },
6370 { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
6371 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6372 SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
6373 0, 0
6374 },
6375 { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
6376 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6377 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
6378 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
6379 },
6380 { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
6381 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6382 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
6383 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
6384 },
6385 { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
6386 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6387 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
6388 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
6389 },
6390 { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
6391 SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6392 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
6393 SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
6394 },
6395 { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
6396 SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
6397 0, 0
6398 },
6399 { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6400 SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
6401 SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
6402 },
6403 { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6404 SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6405 0, 0
6406 },
6407 { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6408 SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6409 0, 0
6410 },
6411 { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6412 SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6413 0, 0
6414 },
6415 { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6416 SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6417 0, 0
6418 },
6419 { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6420 SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6421 0, 0
6422 },
6423 { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6424 SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6425 0, 0
6426 },
6427 { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6428 SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6429 SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6430 },
6431 { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6432 SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6433 SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6434 },
6435 { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6436 SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6437 SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6438 },
6439 { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6440 SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6441 SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6442 },
6443 { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6444 SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6445 SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6446 },
6447 { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6448 SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6449 0, 0
6450 },
6451 { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6452 SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6453 0, 0
6454 },
6455 { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6456 SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6457 0, 0
6458 },
6459 { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6460 SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6461 0, 0
6462 },
6463 { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6464 SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6465 0, 0
6466 },
6467 { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6468 SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6469 0, 0
6470 },
6471 { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6472 SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6473 0, 0
6474 },
6475 { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6476 SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6477 0, 0
6478 },
6479 { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6480 SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6481 0, 0
6482 },
6483 { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6484 SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6485 0, 0
6486 },
6487 { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6488 SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6489 0, 0
6490 },
6491 { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6492 SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6493 0, 0
6494 },
6495 { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6496 SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6497 0, 0
6498 },
6499 { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6500 SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6501 0, 0
6502 },
6503 { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6504 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6505 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6506 },
6507 { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6508 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6509 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6510 },
6511 { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6512 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6513 0, 0
6514 },
6515 { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6516 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6517 0, 0
6518 },
6519 { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6520 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6521 0, 0
6522 },
6523 { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6524 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6525 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6526 },
6527 { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6528 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6529 SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6530 },
6531 { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6532 SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6533 SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6534 },
6535 { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6536 SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6537 SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6538 },
6539 { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6540 SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6541 0, 0
6542 },
6543 { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6544 SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6545 SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6546 },
6547 { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6548 SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6549 SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6550 },
6551 { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6552 SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6553 SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6554 },
6555 { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6556 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6557 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6558 },
6559 { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6560 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6561 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6562 },
6563 { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6564 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6565 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6566 },
6567 { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6568 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6569 SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6570 },
6571 { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6572 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6573 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6574 },
6575 { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6576 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6577 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6578 },
6579 { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6580 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6581 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6582 },
6583 { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6584 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6585 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6586 },
6587 { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6588 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6589 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6590 },
6591 { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6592 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6593 SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6594 },
6595 { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6596 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6597 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6598 },
6599 { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6600 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6601 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6602 },
6603 { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6604 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6605 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6606 },
6607 { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6608 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6609 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6610 },
6611 { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6612 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6613 0, 0
6614 },
6615 { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6616 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6617 0, 0
6618 },
6619 { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6620 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6621 0, 0
6622 },
6623 { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6624 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6625 0, 0
6626 },
6627 { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6628 SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6629 0, 0
6630 },
6631 { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6632 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6633 SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6634 },
6635 { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6636 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6637 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6638 },
6639 { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6640 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6641 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6642 },
6643 { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6644 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6645 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6646 },
6647 { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6648 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6649 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6650 },
6651 { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6652 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6653 0, 0
6654 },
6655 { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6656 SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6657 0, 0
6658 },
6659 { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6660 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6661 0, 0
6662 },
6663 { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6664 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6665 0, 0
6666 },
6667 { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6668 SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6669 0, 0
6670 },
6671 { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6672 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6673 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6674 },
6675 { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6676 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6677 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6678 },
6679 { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6680 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6681 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6682 },
6683 { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6684 SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6685 SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6686 },
6687 { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6688 SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6689 SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6690 },
6691 { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6692 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6693 0, 0
6694 },
6695 { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6696 SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6697 0, 0
6698 },
6699 { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6700 SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6701 0, 0
6702 },
6703 { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6704 SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6705 0, 0
6706 },
6707 { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6708 SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6709 0, 0
6710 },
6711 { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6712 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6713 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6714 },
6715 { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6716 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6717 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6718 },
6719 { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6720 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6721 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6722 },
6723 { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6724 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6725 0, 0
6726 },
6727 { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6728 SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6729 0, 0
6730 },
6731 { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6732 SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6733 0, 0
6734 },
6735 { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6736 SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6737 0, 0
6738 },
6739 { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6740 SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6741 0, 0
6742 },
6743 { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6744 SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6745 0, 0
6746 }
6747 };
6748
gfx_v9_0_ras_error_inject(struct amdgpu_device * adev,void * inject_if,uint32_t instance_mask)6749 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6750 void *inject_if, uint32_t instance_mask)
6751 {
6752 struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6753 int ret;
6754 struct ta_ras_trigger_error_input block_info = { 0 };
6755
6756 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6757 return -EINVAL;
6758
6759 if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6760 return -EINVAL;
6761
6762 if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6763 return -EPERM;
6764
6765 if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6766 info->head.type)) {
6767 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6768 ras_gfx_subblocks[info->head.sub_block_index].name,
6769 info->head.type);
6770 return -EPERM;
6771 }
6772
6773 if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6774 info->head.type)) {
6775 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6776 ras_gfx_subblocks[info->head.sub_block_index].name,
6777 info->head.type);
6778 return -EPERM;
6779 }
6780
6781 block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6782 block_info.sub_block_index =
6783 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6784 block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6785 block_info.address = info->address;
6786 block_info.value = info->value;
6787
6788 mutex_lock(&adev->grbm_idx_mutex);
6789 ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask);
6790 mutex_unlock(&adev->grbm_idx_mutex);
6791
6792 return ret;
6793 }
6794
6795 static const char * const vml2_mems[] = {
6796 "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6797 "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6798 "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6799 "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6800 "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6801 "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6802 "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6803 "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6804 "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6805 "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6806 "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6807 "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6808 "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6809 "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6810 "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6811 "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6812 };
6813
6814 static const char * const vml2_walker_mems[] = {
6815 "UTC_VML2_CACHE_PDE0_MEM0",
6816 "UTC_VML2_CACHE_PDE0_MEM1",
6817 "UTC_VML2_CACHE_PDE1_MEM0",
6818 "UTC_VML2_CACHE_PDE1_MEM1",
6819 "UTC_VML2_CACHE_PDE2_MEM0",
6820 "UTC_VML2_CACHE_PDE2_MEM1",
6821 "UTC_VML2_RDIF_LOG_FIFO",
6822 };
6823
6824 static const char * const atc_l2_cache_2m_mems[] = {
6825 "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6826 "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6827 "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6828 "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6829 };
6830
6831 static const char *atc_l2_cache_4k_mems[] = {
6832 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6833 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6834 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6835 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6836 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6837 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6838 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6839 "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6840 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6841 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6842 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6843 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6844 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6845 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6846 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6847 "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6848 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6849 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6850 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6851 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6852 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6853 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6854 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6855 "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6856 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6857 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6858 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6859 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6860 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6861 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6862 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6863 "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6864 };
6865
gfx_v9_0_query_utc_edc_status(struct amdgpu_device * adev,struct ras_err_data * err_data)6866 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6867 struct ras_err_data *err_data)
6868 {
6869 uint32_t i, data;
6870 uint32_t sec_count, ded_count;
6871
6872 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6873 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6874 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6875 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6876 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6877 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6878 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6879 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6880
6881 for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6882 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6883 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6884
6885 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6886 if (sec_count) {
6887 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6888 "SEC %d\n", i, vml2_mems[i], sec_count);
6889 err_data->ce_count += sec_count;
6890 }
6891
6892 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6893 if (ded_count) {
6894 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6895 "DED %d\n", i, vml2_mems[i], ded_count);
6896 err_data->ue_count += ded_count;
6897 }
6898 }
6899
6900 for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6901 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6902 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6903
6904 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6905 SEC_COUNT);
6906 if (sec_count) {
6907 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6908 "SEC %d\n", i, vml2_walker_mems[i], sec_count);
6909 err_data->ce_count += sec_count;
6910 }
6911
6912 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6913 DED_COUNT);
6914 if (ded_count) {
6915 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6916 "DED %d\n", i, vml2_walker_mems[i], ded_count);
6917 err_data->ue_count += ded_count;
6918 }
6919 }
6920
6921 for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6922 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6923 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6924
6925 sec_count = (data & 0x00006000L) >> 0xd;
6926 if (sec_count) {
6927 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6928 "SEC %d\n", i, atc_l2_cache_2m_mems[i],
6929 sec_count);
6930 err_data->ce_count += sec_count;
6931 }
6932 }
6933
6934 for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6935 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6936 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6937
6938 sec_count = (data & 0x00006000L) >> 0xd;
6939 if (sec_count) {
6940 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6941 "SEC %d\n", i, atc_l2_cache_4k_mems[i],
6942 sec_count);
6943 err_data->ce_count += sec_count;
6944 }
6945
6946 ded_count = (data & 0x00018000L) >> 0xf;
6947 if (ded_count) {
6948 dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6949 "DED %d\n", i, atc_l2_cache_4k_mems[i],
6950 ded_count);
6951 err_data->ue_count += ded_count;
6952 }
6953 }
6954
6955 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6956 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6957 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6958 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6959
6960 return 0;
6961 }
6962
gfx_v9_0_ras_error_count(struct amdgpu_device * adev,const struct soc15_reg_entry * reg,uint32_t se_id,uint32_t inst_id,uint32_t value,uint32_t * sec_count,uint32_t * ded_count)6963 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6964 const struct soc15_reg_entry *reg,
6965 uint32_t se_id, uint32_t inst_id, uint32_t value,
6966 uint32_t *sec_count, uint32_t *ded_count)
6967 {
6968 uint32_t i;
6969 uint32_t sec_cnt, ded_cnt;
6970
6971 for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6972 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6973 gfx_v9_0_ras_fields[i].seg != reg->seg ||
6974 gfx_v9_0_ras_fields[i].inst != reg->inst)
6975 continue;
6976
6977 sec_cnt = (value &
6978 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6979 gfx_v9_0_ras_fields[i].sec_count_shift;
6980 if (sec_cnt) {
6981 dev_info(adev->dev, "GFX SubBlock %s, "
6982 "Instance[%d][%d], SEC %d\n",
6983 gfx_v9_0_ras_fields[i].name,
6984 se_id, inst_id,
6985 sec_cnt);
6986 *sec_count += sec_cnt;
6987 }
6988
6989 ded_cnt = (value &
6990 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6991 gfx_v9_0_ras_fields[i].ded_count_shift;
6992 if (ded_cnt) {
6993 dev_info(adev->dev, "GFX SubBlock %s, "
6994 "Instance[%d][%d], DED %d\n",
6995 gfx_v9_0_ras_fields[i].name,
6996 se_id, inst_id,
6997 ded_cnt);
6998 *ded_count += ded_cnt;
6999 }
7000 }
7001
7002 return 0;
7003 }
7004
gfx_v9_0_reset_ras_error_count(struct amdgpu_device * adev)7005 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
7006 {
7007 int i, j, k;
7008
7009 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
7010 return;
7011
7012 /* read back registers to clear the counters */
7013 mutex_lock(&adev->grbm_idx_mutex);
7014 for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
7015 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
7016 for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
7017 amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0);
7018 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
7019 }
7020 }
7021 }
7022 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
7023 mutex_unlock(&adev->grbm_idx_mutex);
7024
7025 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
7026 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
7027 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
7028 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
7029 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
7030 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
7031 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
7032 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
7033
7034 for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
7035 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
7036 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
7037 }
7038
7039 for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
7040 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
7041 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
7042 }
7043
7044 for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
7045 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
7046 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
7047 }
7048
7049 for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
7050 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
7051 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
7052 }
7053
7054 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
7055 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
7056 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
7057 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
7058 }
7059
gfx_v9_0_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)7060 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
7061 void *ras_error_status)
7062 {
7063 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
7064 uint32_t sec_count = 0, ded_count = 0;
7065 uint32_t i, j, k;
7066 uint32_t reg_value;
7067
7068 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
7069 return;
7070
7071 err_data->ue_count = 0;
7072 err_data->ce_count = 0;
7073
7074 mutex_lock(&adev->grbm_idx_mutex);
7075
7076 for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
7077 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
7078 for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
7079 amdgpu_gfx_select_se_sh(adev, j, 0, k, 0);
7080 reg_value =
7081 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
7082 if (reg_value)
7083 gfx_v9_0_ras_error_count(adev,
7084 &gfx_v9_0_edc_counter_regs[i],
7085 j, k, reg_value,
7086 &sec_count, &ded_count);
7087 }
7088 }
7089 }
7090
7091 err_data->ce_count += sec_count;
7092 err_data->ue_count += ded_count;
7093
7094 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7095 mutex_unlock(&adev->grbm_idx_mutex);
7096
7097 gfx_v9_0_query_utc_edc_status(adev, err_data);
7098 }
7099
gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring * ring,uint32_t pipe,bool enable)7100 static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
7101 uint32_t pipe, bool enable)
7102 {
7103 struct amdgpu_device *adev = ring->adev;
7104 uint32_t val;
7105 uint32_t wcl_cs_reg;
7106
7107 /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
7108 val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
7109
7110 switch (pipe) {
7111 case 0:
7112 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
7113 break;
7114 case 1:
7115 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
7116 break;
7117 case 2:
7118 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
7119 break;
7120 case 3:
7121 wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
7122 break;
7123 default:
7124 DRM_DEBUG("invalid pipe %d\n", pipe);
7125 return;
7126 }
7127
7128 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
7129
7130 }
gfx_v9_0_emit_wave_limit(struct amdgpu_ring * ring,bool enable)7131 static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
7132 {
7133 struct amdgpu_device *adev = ring->adev;
7134 uint32_t val;
7135 int i;
7136
7137
7138 /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
7139 * number of gfx waves. Setting 5 bit will make sure gfx only gets
7140 * around 25% of gpu resources.
7141 */
7142 val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
7143 amdgpu_ring_emit_wreg(ring,
7144 SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
7145 val);
7146
7147 /* Restrict waves for normal/low priority compute queues as well
7148 * to get best QoS for high priority compute jobs.
7149 *
7150 * amdgpu controls only 1st ME(0-3 CS pipes).
7151 */
7152 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
7153 if (i != ring->pipe)
7154 gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
7155
7156 }
7157 }
7158
gfx_v9_ring_insert_nop(struct amdgpu_ring * ring,uint32_t num_nop)7159 static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
7160 {
7161 /* Header itself is a NOP packet */
7162 if (num_nop == 1) {
7163 amdgpu_ring_write(ring, ring->funcs->nop);
7164 return;
7165 }
7166
7167 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
7168 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
7169
7170 /* Header is at index 0, followed by num_nops - 1 NOP packet's */
7171 amdgpu_ring_insert_nop(ring, num_nop - 1);
7172 }
7173
gfx_v9_0_ring_emit_wreg_me(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)7174 static void gfx_v9_0_ring_emit_wreg_me(struct amdgpu_ring *ring,
7175 uint32_t reg,
7176 uint32_t val)
7177 {
7178 uint32_t cmd = 0;
7179
7180 switch (ring->funcs->type) {
7181 case AMDGPU_RING_TYPE_KIQ:
7182 cmd = (1 << 16); /* no inc addr */
7183 break;
7184 default:
7185 cmd = WR_CONFIRM;
7186 break;
7187 }
7188 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
7189 amdgpu_ring_write(ring, cmd);
7190 amdgpu_ring_write(ring, reg);
7191 amdgpu_ring_write(ring, 0);
7192 amdgpu_ring_write(ring, val);
7193 }
7194
gfx_v9_0_reset_kgq(struct amdgpu_ring * ring,unsigned int vmid,struct amdgpu_fence * timedout_fence)7195 static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring,
7196 unsigned int vmid,
7197 struct amdgpu_fence *timedout_fence)
7198 {
7199 struct amdgpu_device *adev = ring->adev;
7200 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
7201 struct amdgpu_ring *kiq_ring = &kiq->ring;
7202 unsigned long flags;
7203 u32 tmp;
7204 int r;
7205
7206 amdgpu_ring_reset_helper_begin(ring, timedout_fence);
7207
7208 spin_lock_irqsave(&kiq->ring_lock, flags);
7209
7210 if (amdgpu_ring_alloc(kiq_ring, 5)) {
7211 spin_unlock_irqrestore(&kiq->ring_lock, flags);
7212 return -ENOMEM;
7213 }
7214
7215 /* send the reset - 5 */
7216 tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
7217 gfx_v9_0_ring_emit_wreg(kiq_ring,
7218 SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
7219 amdgpu_ring_commit(kiq_ring);
7220 r = amdgpu_ring_test_ring(kiq_ring);
7221 spin_unlock_irqrestore(&kiq->ring_lock, flags);
7222 if (r)
7223 return r;
7224
7225 if (amdgpu_ring_alloc(ring, 8 + 7 + 5 + 2 + 8 + 7))
7226 return -ENOMEM;
7227 /* emit the fence to finish the reset - 8 */
7228 ring->trail_seq++;
7229 gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
7230 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC);
7231 /* wait for the fence - 7 */
7232 gfx_v9_0_wait_reg_mem(ring, 0, 1, 0,
7233 lower_32_bits(ring->trail_fence_gpu_addr),
7234 upper_32_bits(ring->trail_fence_gpu_addr),
7235 ring->trail_seq, 0xffffffff, 4);
7236 /* clear mmCP_VMID_RESET - 5 */
7237 gfx_v9_0_ring_emit_wreg_me(ring,
7238 SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
7239 /* event write ENABLE_LEGACY_PIPELINE - 2 */
7240 gfx_v9_0_ring_emit_event_write(ring, ENABLE_LEGACY_PIPELINE, 0);
7241 /* emit a regular fence - 8 */
7242 ring->trail_seq++;
7243 gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
7244 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC);
7245 /* wait for the fence - 7 */
7246 gfx_v9_0_wait_reg_mem(ring, 1, 1, 0,
7247 lower_32_bits(ring->trail_fence_gpu_addr),
7248 upper_32_bits(ring->trail_fence_gpu_addr),
7249 ring->trail_seq, 0xffffffff, 4);
7250 amdgpu_ring_commit(ring);
7251 /* wait for the commands to complete */
7252 r = amdgpu_ring_test_ring(ring);
7253 if (r)
7254 return r;
7255
7256 return amdgpu_ring_reset_helper_end(ring, timedout_fence);
7257 }
7258
gfx_v9_0_reset_kcq(struct amdgpu_ring * ring,unsigned int vmid,struct amdgpu_fence * timedout_fence)7259 static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
7260 unsigned int vmid,
7261 struct amdgpu_fence *timedout_fence)
7262 {
7263 struct amdgpu_device *adev = ring->adev;
7264 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
7265 struct amdgpu_ring *kiq_ring = &kiq->ring;
7266 unsigned long flags;
7267 int i, r;
7268
7269 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
7270 return -EINVAL;
7271
7272 amdgpu_ring_reset_helper_begin(ring, timedout_fence);
7273
7274 spin_lock_irqsave(&kiq->ring_lock, flags);
7275
7276 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
7277 spin_unlock_irqrestore(&kiq->ring_lock, flags);
7278 return -ENOMEM;
7279 }
7280
7281 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
7282 0, 0);
7283 amdgpu_ring_commit(kiq_ring);
7284
7285 spin_unlock_irqrestore(&kiq->ring_lock, flags);
7286
7287 r = amdgpu_ring_test_ring(kiq_ring);
7288 if (r)
7289 return r;
7290
7291 /* make sure dequeue is complete*/
7292 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
7293 mutex_lock(&adev->srbm_mutex);
7294 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
7295 for (i = 0; i < adev->usec_timeout; i++) {
7296 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
7297 break;
7298 udelay(1);
7299 }
7300 if (i >= adev->usec_timeout)
7301 r = -ETIMEDOUT;
7302 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
7303 mutex_unlock(&adev->srbm_mutex);
7304 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
7305 if (r) {
7306 dev_err(adev->dev, "fail to wait on hqd deactive\n");
7307 return r;
7308 }
7309
7310 r = gfx_v9_0_kcq_init_queue(ring, true);
7311 if (r) {
7312 dev_err(adev->dev, "fail to init kcq\n");
7313 return r;
7314 }
7315 spin_lock_irqsave(&kiq->ring_lock, flags);
7316 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
7317 if (r) {
7318 spin_unlock_irqrestore(&kiq->ring_lock, flags);
7319 return -ENOMEM;
7320 }
7321 kiq->pmf->kiq_map_queues(kiq_ring, ring);
7322 amdgpu_ring_commit(kiq_ring);
7323 r = amdgpu_ring_test_ring(kiq_ring);
7324 spin_unlock_irqrestore(&kiq->ring_lock, flags);
7325 if (r) {
7326 DRM_ERROR("fail to remap queue\n");
7327 return r;
7328 }
7329 return amdgpu_ring_reset_helper_end(ring, timedout_fence);
7330 }
7331
gfx_v9_ip_print(struct amdgpu_ip_block * ip_block,struct drm_printer * p)7332 static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
7333 {
7334 struct amdgpu_device *adev = ip_block->adev;
7335 uint32_t i, j, k, reg, index = 0;
7336 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
7337
7338 if (!adev->gfx.ip_dump_core)
7339 return;
7340
7341 for (i = 0; i < reg_count; i++)
7342 drm_printf(p, "%-50s \t 0x%08x\n",
7343 gc_reg_list_9[i].reg_name,
7344 adev->gfx.ip_dump_core[i]);
7345
7346 /* print compute queue registers for all instances */
7347 if (!adev->gfx.ip_dump_compute_queues)
7348 return;
7349
7350 reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
7351 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
7352 adev->gfx.mec.num_mec,
7353 adev->gfx.mec.num_pipe_per_mec,
7354 adev->gfx.mec.num_queue_per_pipe);
7355
7356 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7357 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7358 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7359 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
7360 for (reg = 0; reg < reg_count; reg++) {
7361 if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
7362 drm_printf(p, "%-50s \t 0x%08x\n",
7363 "mmCP_MEC_ME2_HEADER_DUMP",
7364 adev->gfx.ip_dump_compute_queues[index + reg]);
7365 else
7366 drm_printf(p, "%-50s \t 0x%08x\n",
7367 gc_cp_reg_list_9[reg].reg_name,
7368 adev->gfx.ip_dump_compute_queues[index + reg]);
7369 }
7370 index += reg_count;
7371 }
7372 }
7373 }
7374
7375 }
7376
gfx_v9_ip_dump(struct amdgpu_ip_block * ip_block)7377 static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
7378 {
7379 struct amdgpu_device *adev = ip_block->adev;
7380 uint32_t i, j, k, reg, index = 0;
7381 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
7382
7383 if (!adev->gfx.ip_dump_core || !adev->gfx.num_gfx_rings)
7384 return;
7385
7386 amdgpu_gfx_off_ctrl(adev, false);
7387 for (i = 0; i < reg_count; i++)
7388 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_9[i]));
7389 amdgpu_gfx_off_ctrl(adev, true);
7390
7391 /* dump compute queue registers for all instances */
7392 if (!adev->gfx.ip_dump_compute_queues)
7393 return;
7394
7395 reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
7396 amdgpu_gfx_off_ctrl(adev, false);
7397 mutex_lock(&adev->srbm_mutex);
7398 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
7399 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
7400 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
7401 /* ME0 is for GFX so start from 1 for CP */
7402 soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
7403
7404 for (reg = 0; reg < reg_count; reg++) {
7405 if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
7406 adev->gfx.ip_dump_compute_queues[index + reg] =
7407 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
7408 else
7409 adev->gfx.ip_dump_compute_queues[index + reg] =
7410 RREG32(SOC15_REG_ENTRY_OFFSET(
7411 gc_cp_reg_list_9[reg]));
7412 }
7413 index += reg_count;
7414 }
7415 }
7416 }
7417 soc15_grbm_select(adev, 0, 0, 0, 0, 0);
7418 mutex_unlock(&adev->srbm_mutex);
7419 amdgpu_gfx_off_ctrl(adev, true);
7420
7421 }
7422
gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring * ring)7423 static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
7424 {
7425 struct amdgpu_device *adev = ring->adev;
7426
7427 /* Emit the cleaner shader */
7428 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
7429 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
7430 else
7431 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));
7432
7433 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
7434 }
7435
gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring * ring)7436 static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
7437 {
7438 struct amdgpu_device *adev = ring->adev;
7439 struct amdgpu_ip_block *gfx_block =
7440 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
7441
7442 amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
7443
7444 /* Raven and PCO APUs seem to have stability issues
7445 * with compute and gfxoff and gfx pg. Disable gfx pg during
7446 * submission and allow again afterwards.
7447 */
7448 if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
7449 gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_UNGATE);
7450 }
7451
gfx_v9_0_ring_end_use_compute(struct amdgpu_ring * ring)7452 static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
7453 {
7454 struct amdgpu_device *adev = ring->adev;
7455 struct amdgpu_ip_block *gfx_block =
7456 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
7457
7458 /* Raven and PCO APUs seem to have stability issues
7459 * with compute and gfxoff and gfx pg. Disable gfx pg during
7460 * submission and allow again afterwards.
7461 */
7462 if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
7463 gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_GATE);
7464
7465 amdgpu_gfx_enforce_isolation_ring_end_use(ring);
7466 }
7467
7468 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
7469 .name = "gfx_v9_0",
7470 .early_init = gfx_v9_0_early_init,
7471 .late_init = gfx_v9_0_late_init,
7472 .sw_init = gfx_v9_0_sw_init,
7473 .sw_fini = gfx_v9_0_sw_fini,
7474 .hw_init = gfx_v9_0_hw_init,
7475 .hw_fini = gfx_v9_0_hw_fini,
7476 .suspend = gfx_v9_0_suspend,
7477 .resume = gfx_v9_0_resume,
7478 .is_idle = gfx_v9_0_is_idle,
7479 .wait_for_idle = gfx_v9_0_wait_for_idle,
7480 .soft_reset = gfx_v9_0_soft_reset,
7481 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
7482 .set_powergating_state = gfx_v9_0_set_powergating_state,
7483 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
7484 .dump_ip_state = gfx_v9_ip_dump,
7485 .print_ip_state = gfx_v9_ip_print,
7486 };
7487
7488 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
7489 .type = AMDGPU_RING_TYPE_GFX,
7490 .align_mask = 0xff,
7491 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
7492 .support_64bit_ptrs = true,
7493 .secure_submission_supported = true,
7494 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
7495 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
7496 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
7497 .emit_frame_size = /* totally 242 maximum if 16 IBs */
7498 5 + /* COND_EXEC */
7499 13 + /* PIPELINE_SYNC */
7500 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7501 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7502 2 + /* VM_FLUSH */
7503 8 + /* FENCE for VM_FLUSH */
7504 20 + /* GDS switch */
7505 4 + /* double SWITCH_BUFFER,
7506 the first COND_EXEC jump to the place just
7507 prior to this double SWITCH_BUFFER */
7508 5 + /* COND_EXEC */
7509 7 + /* HDP_flush */
7510 4 + /* VGT_flush */
7511 14 + /* CE_META */
7512 31 + /* DE_META */
7513 3 + /* CNTX_CTRL */
7514 5 + /* HDP_INVL */
7515 8 + 8 + /* FENCE x2 */
7516 2 + /* SWITCH_BUFFER */
7517 7 + /* gfx_v9_0_emit_mem_sync */
7518 2, /* gfx_v9_0_ring_emit_cleaner_shader */
7519 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
7520 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
7521 .emit_fence = gfx_v9_0_ring_emit_fence,
7522 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7523 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7524 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7525 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7526 .test_ring = gfx_v9_0_ring_test_ring,
7527 .insert_nop = gfx_v9_ring_insert_nop,
7528 .pad_ib = amdgpu_ring_generic_pad_ib,
7529 .emit_switch_buffer = gfx_v9_ring_emit_sb,
7530 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
7531 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
7532 .preempt_ib = gfx_v9_0_ring_preempt_ib,
7533 .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
7534 .emit_wreg = gfx_v9_0_ring_emit_wreg,
7535 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7536 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7537 .emit_mem_sync = gfx_v9_0_emit_mem_sync,
7538 .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7539 .reset = gfx_v9_0_reset_kgq,
7540 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
7541 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
7542 };
7543
7544 static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
7545 .type = AMDGPU_RING_TYPE_GFX,
7546 .align_mask = 0xff,
7547 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
7548 .support_64bit_ptrs = true,
7549 .secure_submission_supported = true,
7550 .get_rptr = amdgpu_sw_ring_get_rptr_gfx,
7551 .get_wptr = amdgpu_sw_ring_get_wptr_gfx,
7552 .set_wptr = amdgpu_sw_ring_set_wptr_gfx,
7553 .emit_frame_size = /* totally 242 maximum if 16 IBs */
7554 5 + /* COND_EXEC */
7555 13 + /* PIPELINE_SYNC */
7556 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7557 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7558 2 + /* VM_FLUSH */
7559 8 + /* FENCE for VM_FLUSH */
7560 20 + /* GDS switch */
7561 4 + /* double SWITCH_BUFFER,
7562 * the first COND_EXEC jump to the place just
7563 * prior to this double SWITCH_BUFFER
7564 */
7565 5 + /* COND_EXEC */
7566 7 + /* HDP_flush */
7567 4 + /* VGT_flush */
7568 14 + /* CE_META */
7569 31 + /* DE_META */
7570 3 + /* CNTX_CTRL */
7571 5 + /* HDP_INVL */
7572 8 + 8 + /* FENCE x2 */
7573 2 + /* SWITCH_BUFFER */
7574 7 + /* gfx_v9_0_emit_mem_sync */
7575 2, /* gfx_v9_0_ring_emit_cleaner_shader */
7576 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
7577 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
7578 .emit_fence = gfx_v9_0_ring_emit_fence,
7579 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7580 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7581 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7582 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7583 .test_ring = gfx_v9_0_ring_test_ring,
7584 .test_ib = gfx_v9_0_ring_test_ib,
7585 .insert_nop = gfx_v9_ring_insert_nop,
7586 .pad_ib = amdgpu_ring_generic_pad_ib,
7587 .emit_switch_buffer = gfx_v9_ring_emit_sb,
7588 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
7589 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
7590 .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
7591 .emit_wreg = gfx_v9_0_ring_emit_wreg,
7592 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7593 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7594 .soft_recovery = gfx_v9_0_ring_soft_recovery,
7595 .emit_mem_sync = gfx_v9_0_emit_mem_sync,
7596 .patch_cntl = gfx_v9_0_ring_patch_cntl,
7597 .patch_de = gfx_v9_0_ring_patch_de_meta,
7598 .patch_ce = gfx_v9_0_ring_patch_ce_meta,
7599 .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7600 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
7601 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
7602 };
7603
7604 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7605 .type = AMDGPU_RING_TYPE_COMPUTE,
7606 .align_mask = 0xff,
7607 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
7608 .support_64bit_ptrs = true,
7609 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
7610 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
7611 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
7612 .emit_frame_size =
7613 20 + /* gfx_v9_0_ring_emit_gds_switch */
7614 7 + /* gfx_v9_0_ring_emit_hdp_flush */
7615 5 + /* hdp invalidate */
7616 9 + /* gfx_v9_0_ring_emit_pipeline_sync */
7617 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7618 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7619 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7620 7 + /* gfx_v9_0_emit_mem_sync */
7621 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
7622 15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
7623 2, /* gfx_v9_0_ring_emit_cleaner_shader */
7624 .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
7625 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
7626 .emit_fence = gfx_v9_0_ring_emit_fence,
7627 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
7628 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
7629 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
7630 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7631 .test_ring = gfx_v9_0_ring_test_ring,
7632 .test_ib = gfx_v9_0_ring_test_ib,
7633 .insert_nop = gfx_v9_ring_insert_nop,
7634 .pad_ib = amdgpu_ring_generic_pad_ib,
7635 .emit_wreg = gfx_v9_0_ring_emit_wreg,
7636 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7637 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7638 .emit_mem_sync = gfx_v9_0_emit_mem_sync,
7639 .emit_wave_limit = gfx_v9_0_emit_wave_limit,
7640 .reset = gfx_v9_0_reset_kcq,
7641 .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
7642 .begin_use = gfx_v9_0_ring_begin_use_compute,
7643 .end_use = gfx_v9_0_ring_end_use_compute,
7644 };
7645
7646 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7647 .type = AMDGPU_RING_TYPE_KIQ,
7648 .align_mask = 0xff,
7649 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
7650 .support_64bit_ptrs = true,
7651 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
7652 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
7653 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
7654 .emit_frame_size =
7655 20 + /* gfx_v9_0_ring_emit_gds_switch */
7656 7 + /* gfx_v9_0_ring_emit_hdp_flush */
7657 5 + /* hdp invalidate */
7658 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
7659 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
7660 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
7661 .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
7662 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
7663 .test_ring = gfx_v9_0_ring_test_ring,
7664 .insert_nop = amdgpu_ring_insert_nop,
7665 .pad_ib = amdgpu_ring_generic_pad_ib,
7666 .emit_rreg = gfx_v9_0_ring_emit_rreg,
7667 .emit_wreg = gfx_v9_0_ring_emit_wreg,
7668 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
7669 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
7670 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
7671 };
7672
gfx_v9_0_set_ring_funcs(struct amdgpu_device * adev)7673 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
7674 {
7675 int i;
7676
7677 adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
7678
7679 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7680 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
7681
7682 if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
7683 for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
7684 adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx;
7685 }
7686
7687 for (i = 0; i < adev->gfx.num_compute_rings; i++)
7688 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
7689 }
7690
7691 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
7692 .set = gfx_v9_0_set_eop_interrupt_state,
7693 .process = gfx_v9_0_eop_irq,
7694 };
7695
7696 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
7697 .set = gfx_v9_0_set_priv_reg_fault_state,
7698 .process = gfx_v9_0_priv_reg_irq,
7699 };
7700
7701 static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = {
7702 .set = gfx_v9_0_set_bad_op_fault_state,
7703 .process = gfx_v9_0_bad_op_irq,
7704 };
7705
7706 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
7707 .set = gfx_v9_0_set_priv_inst_fault_state,
7708 .process = gfx_v9_0_priv_inst_irq,
7709 };
7710
7711 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
7712 .set = gfx_v9_0_set_cp_ecc_error_state,
7713 .process = amdgpu_gfx_cp_ecc_error_irq,
7714 };
7715
7716
gfx_v9_0_set_irq_funcs(struct amdgpu_device * adev)7717 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
7718 {
7719 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7720 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
7721
7722 adev->gfx.priv_reg_irq.num_types = 1;
7723 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
7724
7725 adev->gfx.bad_op_irq.num_types = 1;
7726 adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs;
7727
7728 adev->gfx.priv_inst_irq.num_types = 1;
7729 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
7730
7731 adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
7732 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
7733 }
7734
gfx_v9_0_set_rlc_funcs(struct amdgpu_device * adev)7735 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
7736 {
7737 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7738 case IP_VERSION(9, 0, 1):
7739 case IP_VERSION(9, 2, 1):
7740 case IP_VERSION(9, 4, 0):
7741 case IP_VERSION(9, 2, 2):
7742 case IP_VERSION(9, 1, 0):
7743 case IP_VERSION(9, 4, 1):
7744 case IP_VERSION(9, 3, 0):
7745 case IP_VERSION(9, 4, 2):
7746 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
7747 break;
7748 default:
7749 break;
7750 }
7751 }
7752
gfx_v9_0_set_gds_init(struct amdgpu_device * adev)7753 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
7754 {
7755 /* init asci gds info */
7756 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7757 case IP_VERSION(9, 0, 1):
7758 case IP_VERSION(9, 2, 1):
7759 case IP_VERSION(9, 4, 0):
7760 adev->gds.gds_size = 0x10000;
7761 break;
7762 case IP_VERSION(9, 2, 2):
7763 case IP_VERSION(9, 1, 0):
7764 case IP_VERSION(9, 4, 1):
7765 adev->gds.gds_size = 0x1000;
7766 break;
7767 case IP_VERSION(9, 4, 2):
7768 /* aldebaran removed all the GDS internal memory,
7769 * only support GWS opcode in kernel, like barrier
7770 * semaphore.etc */
7771 adev->gds.gds_size = 0;
7772 break;
7773 default:
7774 adev->gds.gds_size = 0x10000;
7775 break;
7776 }
7777
7778 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
7779 case IP_VERSION(9, 0, 1):
7780 case IP_VERSION(9, 4, 0):
7781 adev->gds.gds_compute_max_wave_id = 0x7ff;
7782 break;
7783 case IP_VERSION(9, 2, 1):
7784 adev->gds.gds_compute_max_wave_id = 0x27f;
7785 break;
7786 case IP_VERSION(9, 2, 2):
7787 case IP_VERSION(9, 1, 0):
7788 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
7789 adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
7790 else
7791 adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
7792 break;
7793 case IP_VERSION(9, 4, 1):
7794 adev->gds.gds_compute_max_wave_id = 0xfff;
7795 break;
7796 case IP_VERSION(9, 4, 2):
7797 /* deprecated for Aldebaran, no usage at all */
7798 adev->gds.gds_compute_max_wave_id = 0;
7799 break;
7800 default:
7801 /* this really depends on the chip */
7802 adev->gds.gds_compute_max_wave_id = 0x7ff;
7803 break;
7804 }
7805
7806 adev->gds.gws_size = 64;
7807 adev->gds.oa_size = 16;
7808 }
7809
gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device * adev,u32 bitmap)7810 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7811 u32 bitmap)
7812 {
7813 u32 data;
7814
7815 if (!bitmap)
7816 return;
7817
7818 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7819 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7820
7821 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
7822 }
7823
gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device * adev)7824 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7825 {
7826 u32 data, mask;
7827
7828 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
7829 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
7830
7831 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7832 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7833
7834 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7835
7836 return (~data) & mask;
7837 }
7838
gfx_v9_0_get_cu_info(struct amdgpu_device * adev,struct amdgpu_cu_info * cu_info)7839 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
7840 struct amdgpu_cu_info *cu_info)
7841 {
7842 int i, j, k, counter, active_cu_number = 0;
7843 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7844 unsigned disable_masks[4 * 4];
7845
7846 if (!adev || !cu_info)
7847 return -EINVAL;
7848
7849 /*
7850 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
7851 */
7852 if (adev->gfx.config.max_shader_engines *
7853 adev->gfx.config.max_sh_per_se > 16)
7854 return -EINVAL;
7855
7856 amdgpu_gfx_parse_disable_cu(adev, disable_masks,
7857 adev->gfx.config.max_shader_engines,
7858 adev->gfx.config.max_sh_per_se);
7859
7860 mutex_lock(&adev->grbm_idx_mutex);
7861 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7862 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7863 mask = 1;
7864 ao_bitmap = 0;
7865 counter = 0;
7866 amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
7867 gfx_v9_0_set_user_cu_inactive_bitmap(
7868 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
7869 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
7870
7871 /*
7872 * The bitmap(and ao_cu_bitmap) in cu_info structure is
7873 * 4x4 size array, and it's usually suitable for Vega
7874 * ASICs which has 4*2 SE/SH layout.
7875 * But for Arcturus, SE/SH layout is changed to 8*1.
7876 * To mostly reduce the impact, we make it compatible
7877 * with current bitmap array as below:
7878 * SE4,SH0 --> bitmap[0][1]
7879 * SE5,SH0 --> bitmap[1][1]
7880 * SE6,SH0 --> bitmap[2][1]
7881 * SE7,SH0 --> bitmap[3][1]
7882 */
7883 cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
7884
7885 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7886 if (bitmap & mask) {
7887 if (counter < adev->gfx.config.max_cu_per_sh)
7888 ao_bitmap |= mask;
7889 counter ++;
7890 }
7891 mask <<= 1;
7892 }
7893 active_cu_number += counter;
7894 if (i < 2 && j < 2)
7895 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7896 cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7897 }
7898 }
7899 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7900 mutex_unlock(&adev->grbm_idx_mutex);
7901
7902 cu_info->number = active_cu_number;
7903 cu_info->ao_cu_mask = ao_cu_mask;
7904 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7905
7906 return 0;
7907 }
7908
7909 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7910 {
7911 .type = AMD_IP_BLOCK_TYPE_GFX,
7912 .major = 9,
7913 .minor = 0,
7914 .rev = 0,
7915 .funcs = &gfx_v9_0_ip_funcs,
7916 };
7917