1 /*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v11_0.h"
34 #include "soc21.h"
35 #include "nvd.h"
36
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 #include "smuio/smuio_13_0_6_offset.h"
40 #include "smuio/smuio_13_0_6_sh_mask.h"
41 #include "navi10_enum.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
43
44 #include "soc15.h"
45 #include "soc15d.h"
46 #include "clearstate_gfx11.h"
47 #include "v11_structs.h"
48 #include "gfx_v11_0.h"
49 #include "gfx_v11_0_cleaner_shader.h"
50 #include "gfx_v11_0_3.h"
51 #include "nbio_v4_3.h"
52 #include "mes_v11_0.h"
53
54 #define GFX11_NUM_GFX_RINGS 1
55 #define GFX11_MEC_HPD_SIZE 2048
56
57 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
58 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
59
60 #define regCGTT_WD_CLK_CTRL 0x5086
61 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1
62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e
63 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1
64 #define regPC_CONFIG_CNTL_1 0x194d
65 #define regPC_CONFIG_CNTL_1_BASE_IDX 1
66
67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
69 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
70 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
71 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
72 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
73 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
75 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
76 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
77 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
79 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
80 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
81 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
83 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
84 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
85 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
86 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
87 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
88 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
89 MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin");
90 MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
91 MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
92 MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
93 MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin");
94 MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin");
95 MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin");
96 MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin");
97 MODULE_FIRMWARE("amdgpu/gc_11_5_2_pfp.bin");
98 MODULE_FIRMWARE("amdgpu/gc_11_5_2_me.bin");
99 MODULE_FIRMWARE("amdgpu/gc_11_5_2_mec.bin");
100 MODULE_FIRMWARE("amdgpu/gc_11_5_2_rlc.bin");
101
102 static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
103 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
104 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
105 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS3),
106 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
107 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
108 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT3),
109 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
110 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
111 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
112 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
113 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
114 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT2),
115 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT2),
116 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
117 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
118 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HPD_STATUS0),
119 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_BASE),
120 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
121 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR),
122 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_BASE),
123 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_RPTR),
124 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB0_WPTR),
125 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_BASE),
126 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_RPTR),
127 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB1_WPTR),
128 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
129 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_CMD_BUFSZ),
130 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
131 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
132 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
133 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_LO),
134 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BASE_HI),
135 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB2_BUFSZ),
136 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
137 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
138 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
139 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
140 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
141 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS),
142 SOC15_REG_ENTRY_STR(GC, 0, regIA_UTCL1_STATUS_2),
143 SOC15_REG_ENTRY_STR(GC, 0, regPA_CL_CNTL_STATUS),
144 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
145 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
146 SOC15_REG_ENTRY_STR(GC, 0, regSQC_CACHES),
147 SOC15_REG_ENTRY_STR(GC, 0, regSQG_STATUS),
148 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
149 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL),
150 SOC15_REG_ENTRY_STR(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS),
151 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
152 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
153 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_CNTL),
154 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
155 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_DEBUG_INTERRUPT_INSTR_PNTR),
156 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_INSTR_PNTR),
157 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_INSTR_PNTR),
158 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
159 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
160 /* cp header registers */
161 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
162 SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
163 SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
164 SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
165 /* SE status registers */
166 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
167 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
168 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
169 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3),
170 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE4),
171 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE5)
172 };
173
174 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
175 /* compute registers */
176 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
177 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
178 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
179 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
180 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
181 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
182 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
183 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
184 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
185 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
186 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
187 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
188 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
189 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
190 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
191 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
192 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
193 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
194 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
195 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
196 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
197 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
198 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
199 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
200 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
201 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
202 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
203 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
204 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
205 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
206 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
207 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
208 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
209 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
210 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
211 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
212 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
213 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
214 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
215 };
216
217 static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
218 /* gfx queue registers */
219 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_ACTIVE),
220 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_VMID),
221 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY),
222 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUANTUM),
223 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE),
224 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_BASE_HI),
225 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_OFFSET),
226 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CNTL),
227 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_CSMD_RPTR),
228 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR),
229 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_WPTR_HI),
230 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST),
231 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_MAPPED),
232 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_QUE_MGR_CONTROL),
233 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_CONTROL0),
234 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_HQD_HQ_STATUS0),
235 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR),
236 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_MQD_BASE_ADDR_HI),
237 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO),
238 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI),
239 SOC15_REG_ENTRY_STR(GC, 0, regCP_RB_RPTR),
240 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
241 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
242 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
243 SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
244 };
245
246 static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
247 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
248 };
249
250 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
251 {
252 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
253 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
254 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
255 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
256 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
257 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
258 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
259 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
260 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
261 };
262
263 #define DEFAULT_SH_MEM_CONFIG \
264 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
265 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
266 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
267
268 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
269 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
270 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
271 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
272 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
273 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
274 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
275 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
276 struct amdgpu_cu_info *cu_info);
277 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
278 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
279 u32 sh_num, u32 instance, int xcc_id);
280 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
281
282 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
283 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
284 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
285 uint32_t val);
286 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
287 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
288 uint16_t pasid, uint32_t flush_type,
289 bool all_hub, uint8_t dst_sel);
290 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
291 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
292 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
293 bool enable);
294
gfx11_kiq_set_resources(struct amdgpu_ring * kiq_ring,uint64_t queue_mask)295 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
296 {
297 struct amdgpu_device *adev = kiq_ring->adev;
298 u64 shader_mc_addr;
299
300 /* Cleaner shader MC address */
301 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
302
303 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
304 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
305 PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
306 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
307 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
308 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
309 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
310 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
311 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
312 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
313 }
314
gfx11_kiq_map_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring)315 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
316 struct amdgpu_ring *ring)
317 {
318 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
319 uint64_t wptr_addr = ring->wptr_gpu_addr;
320 uint32_t me = 0, eng_sel = 0;
321
322 switch (ring->funcs->type) {
323 case AMDGPU_RING_TYPE_COMPUTE:
324 me = 1;
325 eng_sel = 0;
326 break;
327 case AMDGPU_RING_TYPE_GFX:
328 me = 0;
329 eng_sel = 4;
330 break;
331 case AMDGPU_RING_TYPE_MES:
332 me = 2;
333 eng_sel = 5;
334 break;
335 default:
336 WARN_ON(1);
337 }
338
339 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
340 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
341 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
342 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
343 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
344 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
345 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
346 PACKET3_MAP_QUEUES_ME((me)) |
347 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
348 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
349 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
350 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
351 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
352 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
353 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
354 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
355 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
356 }
357
gfx11_kiq_unmap_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)358 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
359 struct amdgpu_ring *ring,
360 enum amdgpu_unmap_queues_action action,
361 u64 gpu_addr, u64 seq)
362 {
363 struct amdgpu_device *adev = kiq_ring->adev;
364 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
365
366 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
367 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
368 return;
369 }
370
371 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
372 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
373 PACKET3_UNMAP_QUEUES_ACTION(action) |
374 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
375 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
376 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
377 amdgpu_ring_write(kiq_ring,
378 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
379
380 if (action == PREEMPT_QUEUES_NO_UNMAP) {
381 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
382 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
383 amdgpu_ring_write(kiq_ring, seq);
384 } else {
385 amdgpu_ring_write(kiq_ring, 0);
386 amdgpu_ring_write(kiq_ring, 0);
387 amdgpu_ring_write(kiq_ring, 0);
388 }
389 }
390
gfx11_kiq_query_status(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,u64 addr,u64 seq)391 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
392 struct amdgpu_ring *ring,
393 u64 addr,
394 u64 seq)
395 {
396 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
397
398 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
399 amdgpu_ring_write(kiq_ring,
400 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
401 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
402 PACKET3_QUERY_STATUS_COMMAND(2));
403 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
404 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
405 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
406 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
407 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
408 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
409 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
410 }
411
gfx11_kiq_invalidate_tlbs(struct amdgpu_ring * kiq_ring,uint16_t pasid,uint32_t flush_type,bool all_hub)412 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
413 uint16_t pasid, uint32_t flush_type,
414 bool all_hub)
415 {
416 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
417 }
418
419 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
420 .kiq_set_resources = gfx11_kiq_set_resources,
421 .kiq_map_queues = gfx11_kiq_map_queues,
422 .kiq_unmap_queues = gfx11_kiq_unmap_queues,
423 .kiq_query_status = gfx11_kiq_query_status,
424 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
425 .set_resources_size = 8,
426 .map_queues_size = 7,
427 .unmap_queues_size = 6,
428 .query_status_size = 7,
429 .invalidate_tlbs_size = 2,
430 };
431
gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device * adev)432 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
433 {
434 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
435 }
436
gfx_v11_0_init_golden_registers(struct amdgpu_device * adev)437 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
438 {
439 if (amdgpu_sriov_vf(adev))
440 return;
441
442 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
443 case IP_VERSION(11, 0, 1):
444 case IP_VERSION(11, 0, 4):
445 soc15_program_register_sequence(adev,
446 golden_settings_gc_11_0_1,
447 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
448 break;
449 default:
450 break;
451 }
452 soc15_program_register_sequence(adev,
453 golden_settings_gc_11_0,
454 (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
455
456 }
457
gfx_v11_0_write_data_to_reg(struct amdgpu_ring * ring,int eng_sel,bool wc,uint32_t reg,uint32_t val)458 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
459 bool wc, uint32_t reg, uint32_t val)
460 {
461 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
462 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
463 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
464 amdgpu_ring_write(ring, reg);
465 amdgpu_ring_write(ring, 0);
466 amdgpu_ring_write(ring, val);
467 }
468
gfx_v11_0_wait_reg_mem(struct amdgpu_ring * ring,int eng_sel,int mem_space,int opt,uint32_t addr0,uint32_t addr1,uint32_t ref,uint32_t mask,uint32_t inv)469 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
470 int mem_space, int opt, uint32_t addr0,
471 uint32_t addr1, uint32_t ref, uint32_t mask,
472 uint32_t inv)
473 {
474 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
475 amdgpu_ring_write(ring,
476 /* memory (1) or register (0) */
477 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
478 WAIT_REG_MEM_OPERATION(opt) | /* wait */
479 WAIT_REG_MEM_FUNCTION(3) | /* equal */
480 WAIT_REG_MEM_ENGINE(eng_sel)));
481
482 if (mem_space)
483 BUG_ON(addr0 & 0x3); /* Dword align */
484 amdgpu_ring_write(ring, addr0);
485 amdgpu_ring_write(ring, addr1);
486 amdgpu_ring_write(ring, ref);
487 amdgpu_ring_write(ring, mask);
488 amdgpu_ring_write(ring, inv); /* poll interval */
489 }
490
gfx_v11_ring_insert_nop(struct amdgpu_ring * ring,uint32_t num_nop)491 static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
492 {
493 /* Header itself is a NOP packet */
494 if (num_nop == 1) {
495 amdgpu_ring_write(ring, ring->funcs->nop);
496 return;
497 }
498
499 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
500 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
501
502 /* Header is at index 0, followed by num_nops - 1 NOP packet's */
503 amdgpu_ring_insert_nop(ring, num_nop - 1);
504 }
505
gfx_v11_0_ring_test_ring(struct amdgpu_ring * ring)506 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
507 {
508 struct amdgpu_device *adev = ring->adev;
509 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
510 uint32_t tmp = 0;
511 unsigned i;
512 int r;
513
514 WREG32(scratch, 0xCAFEDEAD);
515 r = amdgpu_ring_alloc(ring, 5);
516 if (r) {
517 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
518 ring->idx, r);
519 return r;
520 }
521
522 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
523 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
524 } else {
525 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
526 amdgpu_ring_write(ring, scratch -
527 PACKET3_SET_UCONFIG_REG_START);
528 amdgpu_ring_write(ring, 0xDEADBEEF);
529 }
530 amdgpu_ring_commit(ring);
531
532 for (i = 0; i < adev->usec_timeout; i++) {
533 tmp = RREG32(scratch);
534 if (tmp == 0xDEADBEEF)
535 break;
536 if (amdgpu_emu_mode == 1)
537 msleep(1);
538 else
539 udelay(1);
540 }
541
542 if (i >= adev->usec_timeout)
543 r = -ETIMEDOUT;
544 return r;
545 }
546
gfx_v11_0_ring_test_ib(struct amdgpu_ring * ring,long timeout)547 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
548 {
549 struct amdgpu_device *adev = ring->adev;
550 struct amdgpu_ib ib;
551 struct dma_fence *f = NULL;
552 unsigned index;
553 uint64_t gpu_addr;
554 volatile uint32_t *cpu_ptr;
555 long r;
556
557 /* MES KIQ fw hasn't indirect buffer support for now */
558 if (adev->enable_mes_kiq &&
559 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
560 return 0;
561
562 memset(&ib, 0, sizeof(ib));
563
564 if (ring->is_mes_queue) {
565 uint32_t padding, offset;
566
567 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
568 padding = amdgpu_mes_ctx_get_offs(ring,
569 AMDGPU_MES_CTX_PADDING_OFFS);
570
571 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
572 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
573
574 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
575 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
576 *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
577 } else {
578 r = amdgpu_device_wb_get(adev, &index);
579 if (r)
580 return r;
581
582 gpu_addr = adev->wb.gpu_addr + (index * 4);
583 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
584 cpu_ptr = &adev->wb.wb[index];
585
586 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
587 if (r) {
588 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
589 goto err1;
590 }
591 }
592
593 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
594 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
595 ib.ptr[2] = lower_32_bits(gpu_addr);
596 ib.ptr[3] = upper_32_bits(gpu_addr);
597 ib.ptr[4] = 0xDEADBEEF;
598 ib.length_dw = 5;
599
600 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
601 if (r)
602 goto err2;
603
604 r = dma_fence_wait_timeout(f, false, timeout);
605 if (r == 0) {
606 r = -ETIMEDOUT;
607 goto err2;
608 } else if (r < 0) {
609 goto err2;
610 }
611
612 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
613 r = 0;
614 else
615 r = -EINVAL;
616 err2:
617 if (!ring->is_mes_queue)
618 amdgpu_ib_free(adev, &ib, NULL);
619 dma_fence_put(f);
620 err1:
621 if (!ring->is_mes_queue)
622 amdgpu_device_wb_free(adev, index);
623 return r;
624 }
625
gfx_v11_0_free_microcode(struct amdgpu_device * adev)626 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
627 {
628 amdgpu_ucode_release(&adev->gfx.pfp_fw);
629 amdgpu_ucode_release(&adev->gfx.me_fw);
630 amdgpu_ucode_release(&adev->gfx.rlc_fw);
631 amdgpu_ucode_release(&adev->gfx.mec_fw);
632
633 kfree(adev->gfx.rlc.register_list_format);
634 }
635
gfx_v11_0_init_toc_microcode(struct amdgpu_device * adev,const char * ucode_prefix)636 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
637 {
638 const struct psp_firmware_header_v1_0 *toc_hdr;
639 int err = 0;
640
641 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
642 "amdgpu/%s_toc.bin", ucode_prefix);
643 if (err)
644 goto out;
645
646 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
647 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
648 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
649 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
650 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
651 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
652 return 0;
653 out:
654 amdgpu_ucode_release(&adev->psp.toc_fw);
655 return err;
656 }
657
gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device * adev)658 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
659 {
660 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
661 case IP_VERSION(11, 0, 0):
662 case IP_VERSION(11, 0, 2):
663 case IP_VERSION(11, 0, 3):
664 if ((adev->gfx.me_fw_version >= 1505) &&
665 (adev->gfx.pfp_fw_version >= 1600) &&
666 (adev->gfx.mec_fw_version >= 512)) {
667 if (amdgpu_sriov_vf(adev))
668 adev->gfx.cp_gfx_shadow = true;
669 else
670 adev->gfx.cp_gfx_shadow = false;
671 }
672 break;
673 default:
674 adev->gfx.cp_gfx_shadow = false;
675 break;
676 }
677 }
678
gfx_v11_0_init_microcode(struct amdgpu_device * adev)679 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
680 {
681 char ucode_prefix[25];
682 int err;
683 const struct rlc_firmware_header_v2_0 *rlc_hdr;
684 uint16_t version_major;
685 uint16_t version_minor;
686
687 DRM_DEBUG("\n");
688
689 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
690 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
691 "amdgpu/%s_pfp.bin", ucode_prefix);
692 if (err)
693 goto out;
694 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
695 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
696 (union amdgpu_firmware_header *)
697 adev->gfx.pfp_fw->data, 2, 0);
698 if (adev->gfx.rs64_enable) {
699 dev_info(adev->dev, "CP RS64 enable\n");
700 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
701 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
702 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
703 } else {
704 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
705 }
706
707 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
708 "amdgpu/%s_me.bin", ucode_prefix);
709 if (err)
710 goto out;
711 if (adev->gfx.rs64_enable) {
712 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
713 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
714 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
715 } else {
716 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
717 }
718
719 if (!amdgpu_sriov_vf(adev)) {
720 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
721 adev->pdev->revision == 0xCE)
722 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
723 "amdgpu/gc_11_0_0_rlc_1.bin");
724 else
725 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
726 "amdgpu/%s_rlc.bin", ucode_prefix);
727 if (err)
728 goto out;
729 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
730 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
731 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
732 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
733 if (err)
734 goto out;
735 }
736
737 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
738 "amdgpu/%s_mec.bin", ucode_prefix);
739 if (err)
740 goto out;
741 if (adev->gfx.rs64_enable) {
742 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
743 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
744 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
745 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
746 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
747 } else {
748 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
749 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
750 }
751
752 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
753 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
754
755 /* only one MEC for gfx 11.0.0. */
756 adev->gfx.mec2_fw = NULL;
757
758 gfx_v11_0_check_fw_cp_gfx_shadow(adev);
759
760 if (adev->gfx.imu.funcs && adev->gfx.imu.funcs->init_microcode) {
761 err = adev->gfx.imu.funcs->init_microcode(adev);
762 if (err)
763 DRM_ERROR("Failed to init imu firmware!\n");
764 return err;
765 }
766
767 out:
768 if (err) {
769 amdgpu_ucode_release(&adev->gfx.pfp_fw);
770 amdgpu_ucode_release(&adev->gfx.me_fw);
771 amdgpu_ucode_release(&adev->gfx.rlc_fw);
772 amdgpu_ucode_release(&adev->gfx.mec_fw);
773 }
774
775 return err;
776 }
777
gfx_v11_0_get_csb_size(struct amdgpu_device * adev)778 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
779 {
780 u32 count = 0;
781 const struct cs_section_def *sect = NULL;
782 const struct cs_extent_def *ext = NULL;
783
784 /* begin clear state */
785 count += 2;
786 /* context control state */
787 count += 3;
788
789 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
790 for (ext = sect->section; ext->extent != NULL; ++ext) {
791 if (sect->id == SECT_CONTEXT)
792 count += 2 + ext->reg_count;
793 else
794 return 0;
795 }
796 }
797
798 /* set PA_SC_TILE_STEERING_OVERRIDE */
799 count += 3;
800 /* end clear state */
801 count += 2;
802 /* clear state */
803 count += 2;
804
805 return count;
806 }
807
gfx_v11_0_get_csb_buffer(struct amdgpu_device * adev,volatile u32 * buffer)808 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
809 volatile u32 *buffer)
810 {
811 u32 count = 0, i;
812 const struct cs_section_def *sect = NULL;
813 const struct cs_extent_def *ext = NULL;
814 int ctx_reg_offset;
815
816 if (adev->gfx.rlc.cs_data == NULL)
817 return;
818 if (buffer == NULL)
819 return;
820
821 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
822 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
823
824 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
825 buffer[count++] = cpu_to_le32(0x80000000);
826 buffer[count++] = cpu_to_le32(0x80000000);
827
828 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
829 for (ext = sect->section; ext->extent != NULL; ++ext) {
830 if (sect->id == SECT_CONTEXT) {
831 buffer[count++] =
832 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
833 buffer[count++] = cpu_to_le32(ext->reg_index -
834 PACKET3_SET_CONTEXT_REG_START);
835 for (i = 0; i < ext->reg_count; i++)
836 buffer[count++] = cpu_to_le32(ext->extent[i]);
837 } else {
838 return;
839 }
840 }
841 }
842
843 ctx_reg_offset =
844 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
845 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
846 buffer[count++] = cpu_to_le32(ctx_reg_offset);
847 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
848
849 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
850 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
851
852 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
853 buffer[count++] = cpu_to_le32(0);
854 }
855
gfx_v11_0_rlc_fini(struct amdgpu_device * adev)856 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
857 {
858 /* clear state block */
859 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
860 &adev->gfx.rlc.clear_state_gpu_addr,
861 (void **)&adev->gfx.rlc.cs_ptr);
862
863 /* jump table block */
864 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
865 &adev->gfx.rlc.cp_table_gpu_addr,
866 (void **)&adev->gfx.rlc.cp_table_ptr);
867 }
868
gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device * adev)869 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
870 {
871 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
872
873 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
874 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
875 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
876 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
877 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
878 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
879 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
880 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
881 adev->gfx.rlc.rlcg_reg_access_supported = true;
882 }
883
gfx_v11_0_rlc_init(struct amdgpu_device * adev)884 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
885 {
886 const struct cs_section_def *cs_data;
887 int r;
888
889 adev->gfx.rlc.cs_data = gfx11_cs_data;
890
891 cs_data = adev->gfx.rlc.cs_data;
892
893 if (cs_data) {
894 /* init clear state block */
895 r = amdgpu_gfx_rlc_init_csb(adev);
896 if (r)
897 return r;
898 }
899
900 /* init spm vmid with 0xf */
901 if (adev->gfx.rlc.funcs->update_spm_vmid)
902 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
903
904 return 0;
905 }
906
gfx_v11_0_mec_fini(struct amdgpu_device * adev)907 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
908 {
909 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
910 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
911 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
912 }
913
gfx_v11_0_me_init(struct amdgpu_device * adev)914 static void gfx_v11_0_me_init(struct amdgpu_device *adev)
915 {
916 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
917
918 amdgpu_gfx_graphics_queue_acquire(adev);
919 }
920
gfx_v11_0_mec_init(struct amdgpu_device * adev)921 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
922 {
923 int r;
924 u32 *hpd;
925 size_t mec_hpd_size;
926
927 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
928
929 /* take ownership of the relevant compute queues */
930 amdgpu_gfx_compute_queue_acquire(adev);
931 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
932
933 if (mec_hpd_size) {
934 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
935 AMDGPU_GEM_DOMAIN_GTT,
936 &adev->gfx.mec.hpd_eop_obj,
937 &adev->gfx.mec.hpd_eop_gpu_addr,
938 (void **)&hpd);
939 if (r) {
940 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
941 gfx_v11_0_mec_fini(adev);
942 return r;
943 }
944
945 memset(hpd, 0, mec_hpd_size);
946
947 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
948 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
949 }
950
951 return 0;
952 }
953
wave_read_ind(struct amdgpu_device * adev,uint32_t wave,uint32_t address)954 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
955 {
956 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
957 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
958 (address << SQ_IND_INDEX__INDEX__SHIFT));
959 return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
960 }
961
wave_read_regs(struct amdgpu_device * adev,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)962 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
963 uint32_t thread, uint32_t regno,
964 uint32_t num, uint32_t *out)
965 {
966 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
967 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
968 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
969 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
970 (SQ_IND_INDEX__AUTO_INCR_MASK));
971 while (num--)
972 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
973 }
974
gfx_v11_0_read_wave_data(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)975 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
976 {
977 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE
978 * field when performing a select_se_sh so it should be
979 * zero here */
980 WARN_ON(simd != 0);
981
982 /* type 3 wave data */
983 dst[(*no_fields)++] = 3;
984 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
985 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
986 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
987 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
988 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
989 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
990 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
991 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
992 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
993 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
994 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
995 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
996 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
997 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
998 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
999 }
1000
gfx_v11_0_read_wave_sgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)1001 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1002 uint32_t wave, uint32_t start,
1003 uint32_t size, uint32_t *dst)
1004 {
1005 WARN_ON(simd != 0);
1006
1007 wave_read_regs(
1008 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1009 dst);
1010 }
1011
gfx_v11_0_read_wave_vgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t start,uint32_t size,uint32_t * dst)1012 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
1013 uint32_t wave, uint32_t thread,
1014 uint32_t start, uint32_t size,
1015 uint32_t *dst)
1016 {
1017 wave_read_regs(
1018 adev, wave, thread,
1019 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1020 }
1021
gfx_v11_0_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm,u32 xcc_id)1022 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
1023 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
1024 {
1025 soc21_grbm_select(adev, me, pipe, q, vm);
1026 }
1027
1028 /* all sizes are in bytes */
1029 #define MQD_SHADOW_BASE_SIZE 73728
1030 #define MQD_SHADOW_BASE_ALIGNMENT 256
1031 #define MQD_FWWORKAREA_SIZE 484
1032 #define MQD_FWWORKAREA_ALIGNMENT 256
1033
gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device * adev,struct amdgpu_gfx_shadow_info * shadow_info)1034 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
1035 struct amdgpu_gfx_shadow_info *shadow_info)
1036 {
1037 if (adev->gfx.cp_gfx_shadow) {
1038 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
1039 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
1040 shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
1041 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
1042 return 0;
1043 } else {
1044 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
1045 return -ENOTSUPP;
1046 }
1047 }
1048
1049 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
1050 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
1051 .select_se_sh = &gfx_v11_0_select_se_sh,
1052 .read_wave_data = &gfx_v11_0_read_wave_data,
1053 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
1054 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
1055 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
1056 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
1057 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
1058 };
1059
gfx_v11_0_gpu_early_init(struct amdgpu_device * adev)1060 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
1061 {
1062 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1063 case IP_VERSION(11, 0, 0):
1064 case IP_VERSION(11, 0, 2):
1065 adev->gfx.config.max_hw_contexts = 8;
1066 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1067 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1068 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1069 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1070 break;
1071 case IP_VERSION(11, 0, 3):
1072 adev->gfx.ras = &gfx_v11_0_3_ras;
1073 adev->gfx.config.max_hw_contexts = 8;
1074 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1075 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1076 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1077 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1078 break;
1079 case IP_VERSION(11, 0, 1):
1080 case IP_VERSION(11, 0, 4):
1081 case IP_VERSION(11, 5, 0):
1082 case IP_VERSION(11, 5, 1):
1083 case IP_VERSION(11, 5, 2):
1084 adev->gfx.config.max_hw_contexts = 8;
1085 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1086 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1087 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
1088 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
1089 break;
1090 default:
1091 BUG();
1092 break;
1093 }
1094
1095 return 0;
1096 }
1097
gfx_v11_0_gfx_ring_init(struct amdgpu_device * adev,int ring_id,int me,int pipe,int queue)1098 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1099 int me, int pipe, int queue)
1100 {
1101 struct amdgpu_ring *ring;
1102 unsigned int irq_type;
1103 unsigned int hw_prio;
1104
1105 ring = &adev->gfx.gfx_ring[ring_id];
1106
1107 ring->me = me;
1108 ring->pipe = pipe;
1109 ring->queue = queue;
1110
1111 ring->ring_obj = NULL;
1112 ring->use_doorbell = true;
1113
1114 if (!ring_id)
1115 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1116 else
1117 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1118 ring->vm_hub = AMDGPU_GFXHUB(0);
1119 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1120
1121 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1122 hw_prio = amdgpu_gfx_is_high_priority_graphics_queue(adev, ring) ?
1123 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1124 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1125 hw_prio, NULL);
1126 }
1127
gfx_v11_0_compute_ring_init(struct amdgpu_device * adev,int ring_id,int mec,int pipe,int queue)1128 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1129 int mec, int pipe, int queue)
1130 {
1131 int r;
1132 unsigned irq_type;
1133 struct amdgpu_ring *ring;
1134 unsigned int hw_prio;
1135
1136 ring = &adev->gfx.compute_ring[ring_id];
1137
1138 /* mec0 is me1 */
1139 ring->me = mec + 1;
1140 ring->pipe = pipe;
1141 ring->queue = queue;
1142
1143 ring->ring_obj = NULL;
1144 ring->use_doorbell = true;
1145 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1146 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1147 + (ring_id * GFX11_MEC_HPD_SIZE);
1148 ring->vm_hub = AMDGPU_GFXHUB(0);
1149 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1150
1151 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1152 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1153 + ring->pipe;
1154 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1155 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1156 /* type-2 packets are deprecated on MEC, use type-3 instead */
1157 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1158 hw_prio, NULL);
1159 if (r)
1160 return r;
1161
1162 return 0;
1163 }
1164
1165 static struct {
1166 SOC21_FIRMWARE_ID id;
1167 unsigned int offset;
1168 unsigned int size;
1169 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
1170
gfx_v11_0_parse_rlc_toc(struct amdgpu_device * adev,void * rlc_toc)1171 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
1172 {
1173 RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
1174
1175 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
1176 (ucode->id < SOC21_FIRMWARE_ID_MAX)) {
1177 rlc_autoload_info[ucode->id].id = ucode->id;
1178 rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
1179 rlc_autoload_info[ucode->id].size = ucode->size * 4;
1180
1181 ucode++;
1182 }
1183 }
1184
gfx_v11_0_calc_toc_total_size(struct amdgpu_device * adev)1185 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
1186 {
1187 uint32_t total_size = 0;
1188 SOC21_FIRMWARE_ID id;
1189
1190 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
1191
1192 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
1193 total_size += rlc_autoload_info[id].size;
1194
1195 /* In case the offset in rlc toc ucode is aligned */
1196 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
1197 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
1198 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
1199
1200 return total_size;
1201 }
1202
gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device * adev)1203 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1204 {
1205 int r;
1206 uint32_t total_size;
1207
1208 total_size = gfx_v11_0_calc_toc_total_size(adev);
1209
1210 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1211 AMDGPU_GEM_DOMAIN_VRAM |
1212 AMDGPU_GEM_DOMAIN_GTT,
1213 &adev->gfx.rlc.rlc_autoload_bo,
1214 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1215 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1216
1217 if (r) {
1218 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1219 return r;
1220 }
1221
1222 return 0;
1223 }
1224
gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device * adev,SOC21_FIRMWARE_ID id,const void * fw_data,uint32_t fw_size,uint32_t * fw_autoload_mask)1225 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1226 SOC21_FIRMWARE_ID id,
1227 const void *fw_data,
1228 uint32_t fw_size,
1229 uint32_t *fw_autoload_mask)
1230 {
1231 uint32_t toc_offset;
1232 uint32_t toc_fw_size;
1233 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1234
1235 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1236 return;
1237
1238 toc_offset = rlc_autoload_info[id].offset;
1239 toc_fw_size = rlc_autoload_info[id].size;
1240
1241 if (fw_size == 0)
1242 fw_size = toc_fw_size;
1243
1244 if (fw_size > toc_fw_size)
1245 fw_size = toc_fw_size;
1246
1247 memcpy(ptr + toc_offset, fw_data, fw_size);
1248
1249 if (fw_size < toc_fw_size)
1250 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1251
1252 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1253 *(uint64_t *)fw_autoload_mask |= 1ULL << id;
1254 }
1255
gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device * adev,uint32_t * fw_autoload_mask)1256 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1257 uint32_t *fw_autoload_mask)
1258 {
1259 void *data;
1260 uint32_t size;
1261 uint64_t *toc_ptr;
1262
1263 *(uint64_t *)fw_autoload_mask |= 0x1;
1264
1265 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1266
1267 data = adev->psp.toc.start_addr;
1268 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1269
1270 toc_ptr = (uint64_t *)data + size / 8 - 1;
1271 *toc_ptr = *(uint64_t *)fw_autoload_mask;
1272
1273 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1274 data, size, fw_autoload_mask);
1275 }
1276
gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device * adev,uint32_t * fw_autoload_mask)1277 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1278 uint32_t *fw_autoload_mask)
1279 {
1280 const __le32 *fw_data;
1281 uint32_t fw_size;
1282 const struct gfx_firmware_header_v1_0 *cp_hdr;
1283 const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1284 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1285 const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1286 uint16_t version_major, version_minor;
1287
1288 if (adev->gfx.rs64_enable) {
1289 /* pfp ucode */
1290 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1291 adev->gfx.pfp_fw->data;
1292 /* instruction */
1293 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1294 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1295 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1296 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1297 fw_data, fw_size, fw_autoload_mask);
1298 /* data */
1299 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1300 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1301 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1302 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1303 fw_data, fw_size, fw_autoload_mask);
1304 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1305 fw_data, fw_size, fw_autoload_mask);
1306 /* me ucode */
1307 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1308 adev->gfx.me_fw->data;
1309 /* instruction */
1310 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1311 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1312 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1313 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1314 fw_data, fw_size, fw_autoload_mask);
1315 /* data */
1316 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1317 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1318 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1319 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1320 fw_data, fw_size, fw_autoload_mask);
1321 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1322 fw_data, fw_size, fw_autoload_mask);
1323 /* mec ucode */
1324 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1325 adev->gfx.mec_fw->data;
1326 /* instruction */
1327 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1328 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1329 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1330 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1331 fw_data, fw_size, fw_autoload_mask);
1332 /* data */
1333 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1334 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1335 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1336 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1337 fw_data, fw_size, fw_autoload_mask);
1338 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1339 fw_data, fw_size, fw_autoload_mask);
1340 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1341 fw_data, fw_size, fw_autoload_mask);
1342 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1343 fw_data, fw_size, fw_autoload_mask);
1344 } else {
1345 /* pfp ucode */
1346 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1347 adev->gfx.pfp_fw->data;
1348 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1349 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1350 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1351 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1352 fw_data, fw_size, fw_autoload_mask);
1353
1354 /* me ucode */
1355 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1356 adev->gfx.me_fw->data;
1357 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1358 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1359 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1360 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1361 fw_data, fw_size, fw_autoload_mask);
1362
1363 /* mec ucode */
1364 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1365 adev->gfx.mec_fw->data;
1366 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1367 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1368 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1369 cp_hdr->jt_size * 4;
1370 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1371 fw_data, fw_size, fw_autoload_mask);
1372 }
1373
1374 /* rlc ucode */
1375 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1376 adev->gfx.rlc_fw->data;
1377 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1378 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1379 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1380 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1381 fw_data, fw_size, fw_autoload_mask);
1382
1383 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1384 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1385 if (version_major == 2) {
1386 if (version_minor >= 2) {
1387 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1388
1389 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1390 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1391 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1392 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1393 fw_data, fw_size, fw_autoload_mask);
1394
1395 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1396 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1397 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1398 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1399 fw_data, fw_size, fw_autoload_mask);
1400 }
1401 }
1402 }
1403
gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device * adev,uint32_t * fw_autoload_mask)1404 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1405 uint32_t *fw_autoload_mask)
1406 {
1407 const __le32 *fw_data;
1408 uint32_t fw_size;
1409 const struct sdma_firmware_header_v2_0 *sdma_hdr;
1410
1411 sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1412 adev->sdma.instance[0].fw->data;
1413 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1414 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1415 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1416
1417 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1418 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1419
1420 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1421 le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1422 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1423
1424 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1425 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1426 }
1427
gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device * adev,uint32_t * fw_autoload_mask)1428 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1429 uint32_t *fw_autoload_mask)
1430 {
1431 const __le32 *fw_data;
1432 unsigned fw_size;
1433 const struct mes_firmware_header_v1_0 *mes_hdr;
1434 int pipe, ucode_id, data_id;
1435
1436 for (pipe = 0; pipe < 2; pipe++) {
1437 if (pipe==0) {
1438 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1439 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1440 } else {
1441 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1442 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1443 }
1444
1445 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1446 adev->mes.fw[pipe]->data;
1447
1448 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1449 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1450 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1451
1452 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1453 ucode_id, fw_data, fw_size, fw_autoload_mask);
1454
1455 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1456 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1457 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1458
1459 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1460 data_id, fw_data, fw_size, fw_autoload_mask);
1461 }
1462 }
1463
gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device * adev)1464 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1465 {
1466 uint32_t rlc_g_offset, rlc_g_size;
1467 uint64_t gpu_addr;
1468 uint32_t autoload_fw_id[2];
1469
1470 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1471
1472 /* RLC autoload sequence 2: copy ucode */
1473 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1474 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1475 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1476 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1477
1478 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1479 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1480 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1481
1482 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1483 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1484
1485 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1486
1487 /* RLC autoload sequence 3: load IMU fw */
1488 if (adev->gfx.imu.funcs->load_microcode)
1489 adev->gfx.imu.funcs->load_microcode(adev);
1490 /* RLC autoload sequence 4 init IMU fw */
1491 if (adev->gfx.imu.funcs->setup_imu)
1492 adev->gfx.imu.funcs->setup_imu(adev);
1493 if (adev->gfx.imu.funcs->start_imu)
1494 adev->gfx.imu.funcs->start_imu(adev);
1495
1496 /* RLC autoload sequence 5 disable gpa mode */
1497 gfx_v11_0_disable_gpa_mode(adev);
1498
1499 return 0;
1500 }
1501
gfx_v11_0_alloc_ip_dump(struct amdgpu_device * adev)1502 static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
1503 {
1504 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
1505 uint32_t *ptr;
1506 uint32_t inst;
1507
1508 ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
1509 if (!ptr) {
1510 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1511 adev->gfx.ip_dump_core = NULL;
1512 } else {
1513 adev->gfx.ip_dump_core = ptr;
1514 }
1515
1516 /* Allocate memory for compute queue registers for all the instances */
1517 reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
1518 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1519 adev->gfx.mec.num_queue_per_pipe;
1520
1521 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1522 if (!ptr) {
1523 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1524 adev->gfx.ip_dump_compute_queues = NULL;
1525 } else {
1526 adev->gfx.ip_dump_compute_queues = ptr;
1527 }
1528
1529 /* Allocate memory for gfx queue registers for all the instances */
1530 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
1531 inst = adev->gfx.me.num_me * adev->gfx.me.num_pipe_per_me *
1532 adev->gfx.me.num_queue_per_pipe;
1533
1534 ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
1535 if (!ptr) {
1536 DRM_ERROR("Failed to allocate memory for GFX Queues IP Dump\n");
1537 adev->gfx.ip_dump_gfx_queues = NULL;
1538 } else {
1539 adev->gfx.ip_dump_gfx_queues = ptr;
1540 }
1541 }
1542
gfx_v11_0_sw_init(struct amdgpu_ip_block * ip_block)1543 static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
1544 {
1545 int i, j, k, r, ring_id = 0;
1546 int xcc_id = 0;
1547 struct amdgpu_device *adev = ip_block->adev;
1548
1549 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1550 case IP_VERSION(11, 0, 0):
1551 case IP_VERSION(11, 0, 2):
1552 case IP_VERSION(11, 0, 3):
1553 adev->gfx.me.num_me = 1;
1554 adev->gfx.me.num_pipe_per_me = 1;
1555 adev->gfx.me.num_queue_per_pipe = 1;
1556 adev->gfx.mec.num_mec = 2;
1557 adev->gfx.mec.num_pipe_per_mec = 4;
1558 adev->gfx.mec.num_queue_per_pipe = 4;
1559 break;
1560 case IP_VERSION(11, 0, 1):
1561 case IP_VERSION(11, 0, 4):
1562 case IP_VERSION(11, 5, 0):
1563 case IP_VERSION(11, 5, 1):
1564 case IP_VERSION(11, 5, 2):
1565 adev->gfx.me.num_me = 1;
1566 adev->gfx.me.num_pipe_per_me = 1;
1567 adev->gfx.me.num_queue_per_pipe = 1;
1568 adev->gfx.mec.num_mec = 1;
1569 adev->gfx.mec.num_pipe_per_mec = 4;
1570 adev->gfx.mec.num_queue_per_pipe = 4;
1571 break;
1572 default:
1573 adev->gfx.me.num_me = 1;
1574 adev->gfx.me.num_pipe_per_me = 1;
1575 adev->gfx.me.num_queue_per_pipe = 1;
1576 adev->gfx.mec.num_mec = 1;
1577 adev->gfx.mec.num_pipe_per_mec = 4;
1578 adev->gfx.mec.num_queue_per_pipe = 8;
1579 break;
1580 }
1581
1582 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1583 case IP_VERSION(11, 0, 0):
1584 case IP_VERSION(11, 0, 2):
1585 case IP_VERSION(11, 0, 3):
1586 adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
1587 adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
1588 if (adev->gfx.me_fw_version >= 2280 &&
1589 adev->gfx.pfp_fw_version >= 2370 &&
1590 adev->gfx.mec_fw_version >= 2450 &&
1591 adev->mes.fw_version[0] >= 99) {
1592 adev->gfx.enable_cleaner_shader = true;
1593 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1594 if (r) {
1595 adev->gfx.enable_cleaner_shader = false;
1596 dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1597 }
1598 }
1599 break;
1600 default:
1601 adev->gfx.enable_cleaner_shader = false;
1602 break;
1603 }
1604
1605 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1606 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) &&
1607 amdgpu_sriov_is_pp_one_vf(adev))
1608 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1609
1610 /* EOP Event */
1611 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1612 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1613 &adev->gfx.eop_irq);
1614 if (r)
1615 return r;
1616
1617 /* Bad opcode Event */
1618 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1619 GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
1620 &adev->gfx.bad_op_irq);
1621 if (r)
1622 return r;
1623
1624 /* Privileged reg */
1625 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1626 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1627 &adev->gfx.priv_reg_irq);
1628 if (r)
1629 return r;
1630
1631 /* Privileged inst */
1632 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1633 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1634 &adev->gfx.priv_inst_irq);
1635 if (r)
1636 return r;
1637
1638 /* FED error */
1639 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1640 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
1641 &adev->gfx.rlc_gc_fed_irq);
1642 if (r)
1643 return r;
1644
1645 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1646
1647 gfx_v11_0_me_init(adev);
1648
1649 r = gfx_v11_0_rlc_init(adev);
1650 if (r) {
1651 DRM_ERROR("Failed to init rlc BOs!\n");
1652 return r;
1653 }
1654
1655 r = gfx_v11_0_mec_init(adev);
1656 if (r) {
1657 DRM_ERROR("Failed to init MEC BOs!\n");
1658 return r;
1659 }
1660
1661 /* set up the gfx ring */
1662 for (i = 0; i < adev->gfx.me.num_me; i++) {
1663 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1664 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1665 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1666 continue;
1667
1668 r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1669 i, k, j);
1670 if (r)
1671 return r;
1672 ring_id++;
1673 }
1674 }
1675 }
1676
1677 ring_id = 0;
1678 /* set up the compute queues - allocate horizontally across pipes */
1679 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1680 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1681 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1682 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
1683 k, j))
1684 continue;
1685
1686 r = gfx_v11_0_compute_ring_init(adev, ring_id,
1687 i, k, j);
1688 if (r)
1689 return r;
1690
1691 ring_id++;
1692 }
1693 }
1694 }
1695
1696 adev->gfx.gfx_supported_reset =
1697 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
1698 adev->gfx.compute_supported_reset =
1699 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1700 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1701 case IP_VERSION(11, 0, 0):
1702 case IP_VERSION(11, 0, 2):
1703 case IP_VERSION(11, 0, 3):
1704 if ((adev->gfx.me_fw_version >= 2280) &&
1705 (adev->gfx.mec_fw_version >= 2410)) {
1706 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1707 adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1708 }
1709 break;
1710 default:
1711 break;
1712 }
1713
1714 if (!adev->enable_mes_kiq) {
1715 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
1716 if (r) {
1717 DRM_ERROR("Failed to init KIQ BOs!\n");
1718 return r;
1719 }
1720
1721 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1722 if (r)
1723 return r;
1724 }
1725
1726 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
1727 if (r)
1728 return r;
1729
1730 /* allocate visible FB for rlc auto-loading fw */
1731 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1732 r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1733 if (r)
1734 return r;
1735 }
1736
1737 r = gfx_v11_0_gpu_early_init(adev);
1738 if (r)
1739 return r;
1740
1741 if (amdgpu_gfx_ras_sw_init(adev)) {
1742 dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
1743 return -EINVAL;
1744 }
1745
1746 gfx_v11_0_alloc_ip_dump(adev);
1747
1748 r = amdgpu_gfx_sysfs_init(adev);
1749 if (r)
1750 return r;
1751
1752 return 0;
1753 }
1754
gfx_v11_0_pfp_fini(struct amdgpu_device * adev)1755 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1756 {
1757 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1758 &adev->gfx.pfp.pfp_fw_gpu_addr,
1759 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1760
1761 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1762 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1763 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1764 }
1765
gfx_v11_0_me_fini(struct amdgpu_device * adev)1766 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1767 {
1768 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1769 &adev->gfx.me.me_fw_gpu_addr,
1770 (void **)&adev->gfx.me.me_fw_ptr);
1771
1772 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1773 &adev->gfx.me.me_fw_data_gpu_addr,
1774 (void **)&adev->gfx.me.me_fw_data_ptr);
1775 }
1776
gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device * adev)1777 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1778 {
1779 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1780 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1781 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1782 }
1783
gfx_v11_0_sw_fini(struct amdgpu_ip_block * ip_block)1784 static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
1785 {
1786 int i;
1787 struct amdgpu_device *adev = ip_block->adev;
1788
1789 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1790 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1791 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1792 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1793
1794 amdgpu_gfx_mqd_sw_fini(adev, 0);
1795
1796 if (!adev->enable_mes_kiq) {
1797 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1798 amdgpu_gfx_kiq_fini(adev, 0);
1799 }
1800
1801 amdgpu_gfx_cleaner_shader_sw_fini(adev);
1802
1803 gfx_v11_0_pfp_fini(adev);
1804 gfx_v11_0_me_fini(adev);
1805 gfx_v11_0_rlc_fini(adev);
1806 gfx_v11_0_mec_fini(adev);
1807
1808 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1809 gfx_v11_0_rlc_autoload_buffer_fini(adev);
1810
1811 gfx_v11_0_free_microcode(adev);
1812
1813 amdgpu_gfx_sysfs_fini(adev);
1814
1815 kfree(adev->gfx.ip_dump_core);
1816 kfree(adev->gfx.ip_dump_compute_queues);
1817 kfree(adev->gfx.ip_dump_gfx_queues);
1818
1819 return 0;
1820 }
1821
gfx_v11_0_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance,int xcc_id)1822 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1823 u32 sh_num, u32 instance, int xcc_id)
1824 {
1825 u32 data;
1826
1827 if (instance == 0xffffffff)
1828 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1829 INSTANCE_BROADCAST_WRITES, 1);
1830 else
1831 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1832 instance);
1833
1834 if (se_num == 0xffffffff)
1835 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1836 1);
1837 else
1838 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1839
1840 if (sh_num == 0xffffffff)
1841 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1842 1);
1843 else
1844 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1845
1846 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1847 }
1848
gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device * adev)1849 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1850 {
1851 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1852
1853 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
1854 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1855 CC_GC_SA_UNIT_DISABLE,
1856 SA_DISABLE);
1857 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
1858 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1859 GC_USER_SA_UNIT_DISABLE,
1860 SA_DISABLE);
1861 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1862 adev->gfx.config.max_shader_engines);
1863
1864 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1865 }
1866
gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device * adev)1867 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1868 {
1869 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1870 u32 rb_mask;
1871
1872 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1873 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1874 CC_RB_BACKEND_DISABLE,
1875 BACKEND_DISABLE);
1876 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1877 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1878 GC_USER_RB_BACKEND_DISABLE,
1879 BACKEND_DISABLE);
1880 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1881 adev->gfx.config.max_shader_engines);
1882
1883 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1884 }
1885
gfx_v11_0_setup_rb(struct amdgpu_device * adev)1886 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1887 {
1888 u32 rb_bitmap_width_per_sa;
1889 u32 max_sa;
1890 u32 active_sa_bitmap;
1891 u32 global_active_rb_bitmap;
1892 u32 active_rb_bitmap = 0;
1893 u32 i;
1894
1895 /* query sa bitmap from SA_UNIT_DISABLE registers */
1896 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
1897 /* query rb bitmap from RB_BACKEND_DISABLE registers */
1898 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
1899
1900 /* generate active rb bitmap according to active sa bitmap */
1901 max_sa = adev->gfx.config.max_shader_engines *
1902 adev->gfx.config.max_sh_per_se;
1903 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1904 adev->gfx.config.max_sh_per_se;
1905 for (i = 0; i < max_sa; i++) {
1906 if (active_sa_bitmap & (1 << i))
1907 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1908 }
1909
1910 active_rb_bitmap &= global_active_rb_bitmap;
1911 adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1912 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1913 }
1914
1915 #define DEFAULT_SH_MEM_BASES (0x6000)
1916 #define LDS_APP_BASE 0x1
1917 #define SCRATCH_APP_BASE 0x2
1918
gfx_v11_0_init_compute_vmid(struct amdgpu_device * adev)1919 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
1920 {
1921 int i;
1922 uint32_t sh_mem_bases;
1923 uint32_t data;
1924
1925 /*
1926 * Configure apertures:
1927 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1928 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1929 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1930 */
1931 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1932 SCRATCH_APP_BASE;
1933
1934 mutex_lock(&adev->srbm_mutex);
1935 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1936 soc21_grbm_select(adev, 0, 0, 0, i);
1937 /* CP and shaders */
1938 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1939 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1940
1941 /* Enable trap for each kfd vmid. */
1942 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
1943 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1944 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
1945 }
1946 soc21_grbm_select(adev, 0, 0, 0, 0);
1947 mutex_unlock(&adev->srbm_mutex);
1948
1949 /*
1950 * Initialize all compute VMIDs to have no GDS, GWS, or OA
1951 * access. These should be enabled by FW for target VMIDs.
1952 */
1953 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1954 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
1955 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
1956 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
1957 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
1958 }
1959 }
1960
gfx_v11_0_init_gds_vmid(struct amdgpu_device * adev)1961 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
1962 {
1963 int vmid;
1964
1965 /*
1966 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1967 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1968 * the driver can enable them for graphics. VMID0 should maintain
1969 * access so that HWS firmware can save/restore entries.
1970 */
1971 for (vmid = 1; vmid < 16; vmid++) {
1972 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
1973 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
1974 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
1975 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
1976 }
1977 }
1978
gfx_v11_0_tcp_harvest(struct amdgpu_device * adev)1979 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
1980 {
1981 /* TODO: harvest feature to be added later. */
1982 }
1983
gfx_v11_0_get_tcc_info(struct amdgpu_device * adev)1984 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
1985 {
1986 /* TCCs are global (not instanced). */
1987 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
1988 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
1989
1990 adev->gfx.config.tcc_disabled_mask =
1991 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1992 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1993 }
1994
gfx_v11_0_constants_init(struct amdgpu_device * adev)1995 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
1996 {
1997 u32 tmp;
1998 int i;
1999
2000 if (!amdgpu_sriov_vf(adev))
2001 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2002
2003 gfx_v11_0_setup_rb(adev);
2004 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
2005 gfx_v11_0_get_tcc_info(adev);
2006 adev->gfx.config.pa_sc_tile_steering_override = 0;
2007
2008 /* Set whether texture coordinate truncation is conformant. */
2009 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
2010 adev->gfx.config.ta_cntl2_truncate_coord_mode =
2011 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
2012
2013 /* XXX SH_MEM regs */
2014 /* where to put LDS, scratch, GPUVM in FSA64 space */
2015 mutex_lock(&adev->srbm_mutex);
2016 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
2017 soc21_grbm_select(adev, 0, 0, 0, i);
2018 /* CP and shaders */
2019 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
2020 if (i != 0) {
2021 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2022 (adev->gmc.private_aperture_start >> 48));
2023 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2024 (adev->gmc.shared_aperture_start >> 48));
2025 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
2026 }
2027 }
2028 soc21_grbm_select(adev, 0, 0, 0, 0);
2029
2030 mutex_unlock(&adev->srbm_mutex);
2031
2032 gfx_v11_0_init_compute_vmid(adev);
2033 gfx_v11_0_init_gds_vmid(adev);
2034 }
2035
gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device * adev,int me,int pipe)2036 static u32 gfx_v11_0_get_cpg_int_cntl(struct amdgpu_device *adev,
2037 int me, int pipe)
2038 {
2039 if (me != 0)
2040 return 0;
2041
2042 switch (pipe) {
2043 case 0:
2044 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
2045 case 1:
2046 return SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
2047 default:
2048 return 0;
2049 }
2050 }
2051
gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device * adev,int me,int pipe)2052 static u32 gfx_v11_0_get_cpc_int_cntl(struct amdgpu_device *adev,
2053 int me, int pipe)
2054 {
2055 /*
2056 * amdgpu controls only the first MEC. That's why this function only
2057 * handles the setting of interrupts for this specific MEC. All other
2058 * pipes' interrupts are set by amdkfd.
2059 */
2060 if (me != 1)
2061 return 0;
2062
2063 switch (pipe) {
2064 case 0:
2065 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
2066 case 1:
2067 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
2068 case 2:
2069 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
2070 case 3:
2071 return SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
2072 default:
2073 return 0;
2074 }
2075 }
2076
gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable)2077 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2078 bool enable)
2079 {
2080 u32 tmp, cp_int_cntl_reg;
2081 int i, j;
2082
2083 if (amdgpu_sriov_vf(adev))
2084 return;
2085
2086 for (i = 0; i < adev->gfx.me.num_me; i++) {
2087 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
2088 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
2089
2090 if (cp_int_cntl_reg) {
2091 tmp = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
2092 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
2093 enable ? 1 : 0);
2094 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
2095 enable ? 1 : 0);
2096 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
2097 enable ? 1 : 0);
2098 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
2099 enable ? 1 : 0);
2100 WREG32_SOC15_IP(GC, cp_int_cntl_reg, tmp);
2101 }
2102 }
2103 }
2104 }
2105
gfx_v11_0_init_csb(struct amdgpu_device * adev)2106 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
2107 {
2108 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2109
2110 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
2111 adev->gfx.rlc.clear_state_gpu_addr >> 32);
2112 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
2113 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2114 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
2115
2116 return 0;
2117 }
2118
gfx_v11_0_rlc_stop(struct amdgpu_device * adev)2119 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
2120 {
2121 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
2122
2123 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
2124 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
2125 }
2126
gfx_v11_0_rlc_reset(struct amdgpu_device * adev)2127 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
2128 {
2129 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2130 udelay(50);
2131 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2132 udelay(50);
2133 }
2134
gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device * adev,bool enable)2135 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
2136 bool enable)
2137 {
2138 uint32_t rlc_pg_cntl;
2139
2140 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
2141
2142 if (!enable) {
2143 /* RLC_PG_CNTL[23] = 0 (default)
2144 * RLC will wait for handshake acks with SMU
2145 * GFXOFF will be enabled
2146 * RLC_PG_CNTL[23] = 1
2147 * RLC will not issue any message to SMU
2148 * hence no handshake between SMU & RLC
2149 * GFXOFF will be disabled
2150 */
2151 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2152 } else
2153 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2154 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
2155 }
2156
gfx_v11_0_rlc_start(struct amdgpu_device * adev)2157 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
2158 {
2159 /* TODO: enable rlc & smu handshake until smu
2160 * and gfxoff feature works as expected */
2161 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
2162 gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
2163
2164 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2165 udelay(50);
2166 }
2167
gfx_v11_0_rlc_enable_srm(struct amdgpu_device * adev)2168 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
2169 {
2170 uint32_t tmp;
2171
2172 /* enable Save Restore Machine */
2173 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
2174 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2175 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
2176 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
2177 }
2178
gfx_v11_0_load_rlcg_microcode(struct amdgpu_device * adev)2179 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
2180 {
2181 const struct rlc_firmware_header_v2_0 *hdr;
2182 const __le32 *fw_data;
2183 unsigned i, fw_size;
2184
2185 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2186 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2187 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2188 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2189
2190 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
2191 RLCG_UCODE_LOADING_START_ADDRESS);
2192
2193 for (i = 0; i < fw_size; i++)
2194 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
2195 le32_to_cpup(fw_data++));
2196
2197 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2198 }
2199
gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device * adev)2200 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
2201 {
2202 const struct rlc_firmware_header_v2_2 *hdr;
2203 const __le32 *fw_data;
2204 unsigned i, fw_size;
2205 u32 tmp;
2206
2207 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
2208
2209 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2210 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
2211 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
2212
2213 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
2214
2215 for (i = 0; i < fw_size; i++) {
2216 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2217 msleep(1);
2218 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
2219 le32_to_cpup(fw_data++));
2220 }
2221
2222 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2223
2224 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2225 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
2226 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
2227
2228 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
2229 for (i = 0; i < fw_size; i++) {
2230 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2231 msleep(1);
2232 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
2233 le32_to_cpup(fw_data++));
2234 }
2235
2236 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2237
2238 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
2239 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
2240 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
2241 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
2242 }
2243
gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device * adev)2244 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
2245 {
2246 const struct rlc_firmware_header_v2_3 *hdr;
2247 const __le32 *fw_data;
2248 unsigned i, fw_size;
2249 u32 tmp;
2250
2251 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
2252
2253 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2254 le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
2255 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
2256
2257 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
2258
2259 for (i = 0; i < fw_size; i++) {
2260 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2261 msleep(1);
2262 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
2263 le32_to_cpup(fw_data++));
2264 }
2265
2266 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
2267
2268 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
2269 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
2270 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
2271
2272 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2273 le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
2274 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
2275
2276 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
2277
2278 for (i = 0; i < fw_size; i++) {
2279 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2280 msleep(1);
2281 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
2282 le32_to_cpup(fw_data++));
2283 }
2284
2285 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
2286
2287 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
2288 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
2289 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
2290 }
2291
gfx_v11_0_rlc_load_microcode(struct amdgpu_device * adev)2292 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
2293 {
2294 const struct rlc_firmware_header_v2_0 *hdr;
2295 uint16_t version_major;
2296 uint16_t version_minor;
2297
2298 if (!adev->gfx.rlc_fw)
2299 return -EINVAL;
2300
2301 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2302 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2303
2304 version_major = le16_to_cpu(hdr->header.header_version_major);
2305 version_minor = le16_to_cpu(hdr->header.header_version_minor);
2306
2307 if (version_major == 2) {
2308 gfx_v11_0_load_rlcg_microcode(adev);
2309 if (amdgpu_dpm == 1) {
2310 if (version_minor >= 2)
2311 gfx_v11_0_load_rlc_iram_dram_microcode(adev);
2312 if (version_minor == 3)
2313 gfx_v11_0_load_rlcp_rlcv_microcode(adev);
2314 }
2315
2316 return 0;
2317 }
2318
2319 return -EINVAL;
2320 }
2321
gfx_v11_0_rlc_resume(struct amdgpu_device * adev)2322 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
2323 {
2324 int r;
2325
2326 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2327 gfx_v11_0_init_csb(adev);
2328
2329 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
2330 gfx_v11_0_rlc_enable_srm(adev);
2331 } else {
2332 if (amdgpu_sriov_vf(adev)) {
2333 gfx_v11_0_init_csb(adev);
2334 return 0;
2335 }
2336
2337 adev->gfx.rlc.funcs->stop(adev);
2338
2339 /* disable CG */
2340 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
2341
2342 /* disable PG */
2343 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
2344
2345 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2346 /* legacy rlc firmware loading */
2347 r = gfx_v11_0_rlc_load_microcode(adev);
2348 if (r)
2349 return r;
2350 }
2351
2352 gfx_v11_0_init_csb(adev);
2353
2354 adev->gfx.rlc.funcs->start(adev);
2355 }
2356 return 0;
2357 }
2358
gfx_v11_0_config_me_cache(struct amdgpu_device * adev,uint64_t addr)2359 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2360 {
2361 uint32_t usec_timeout = 50000; /* wait for 50ms */
2362 uint32_t tmp;
2363 int i;
2364
2365 /* Trigger an invalidation of the L1 instruction caches */
2366 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2367 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2368 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2369
2370 /* Wait for invalidation complete */
2371 for (i = 0; i < usec_timeout; i++) {
2372 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2373 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2374 INVALIDATE_CACHE_COMPLETE))
2375 break;
2376 udelay(1);
2377 }
2378
2379 if (i >= usec_timeout) {
2380 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2381 return -EINVAL;
2382 }
2383
2384 if (amdgpu_emu_mode == 1)
2385 adev->hdp.funcs->flush_hdp(adev, NULL);
2386
2387 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2388 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2389 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2390 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2391 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2392 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2393
2394 /* Program me ucode address into intruction cache address register */
2395 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2396 lower_32_bits(addr) & 0xFFFFF000);
2397 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2398 upper_32_bits(addr));
2399
2400 return 0;
2401 }
2402
gfx_v11_0_config_pfp_cache(struct amdgpu_device * adev,uint64_t addr)2403 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2404 {
2405 uint32_t usec_timeout = 50000; /* wait for 50ms */
2406 uint32_t tmp;
2407 int i;
2408
2409 /* Trigger an invalidation of the L1 instruction caches */
2410 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2411 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2412 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2413
2414 /* Wait for invalidation complete */
2415 for (i = 0; i < usec_timeout; i++) {
2416 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2417 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2418 INVALIDATE_CACHE_COMPLETE))
2419 break;
2420 udelay(1);
2421 }
2422
2423 if (i >= usec_timeout) {
2424 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2425 return -EINVAL;
2426 }
2427
2428 if (amdgpu_emu_mode == 1)
2429 adev->hdp.funcs->flush_hdp(adev, NULL);
2430
2431 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2432 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2433 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2434 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2435 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2436 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2437
2438 /* Program pfp ucode address into intruction cache address register */
2439 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2440 lower_32_bits(addr) & 0xFFFFF000);
2441 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2442 upper_32_bits(addr));
2443
2444 return 0;
2445 }
2446
gfx_v11_0_config_mec_cache(struct amdgpu_device * adev,uint64_t addr)2447 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2448 {
2449 uint32_t usec_timeout = 50000; /* wait for 50ms */
2450 uint32_t tmp;
2451 int i;
2452
2453 /* Trigger an invalidation of the L1 instruction caches */
2454 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2455 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2456
2457 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2458
2459 /* Wait for invalidation complete */
2460 for (i = 0; i < usec_timeout; i++) {
2461 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2462 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2463 INVALIDATE_CACHE_COMPLETE))
2464 break;
2465 udelay(1);
2466 }
2467
2468 if (i >= usec_timeout) {
2469 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2470 return -EINVAL;
2471 }
2472
2473 if (amdgpu_emu_mode == 1)
2474 adev->hdp.funcs->flush_hdp(adev, NULL);
2475
2476 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2477 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2478 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2479 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2480 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2481
2482 /* Program mec1 ucode address into intruction cache address register */
2483 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2484 lower_32_bits(addr) & 0xFFFFF000);
2485 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2486 upper_32_bits(addr));
2487
2488 return 0;
2489 }
2490
gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device * adev,uint64_t addr,uint64_t addr2)2491 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2492 {
2493 uint32_t usec_timeout = 50000; /* wait for 50ms */
2494 uint32_t tmp;
2495 unsigned i, pipe_id;
2496 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2497
2498 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2499 adev->gfx.pfp_fw->data;
2500
2501 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2502 lower_32_bits(addr));
2503 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2504 upper_32_bits(addr));
2505
2506 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2507 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2508 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2509 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2510 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2511
2512 /*
2513 * Programming any of the CP_PFP_IC_BASE registers
2514 * forces invalidation of the ME L1 I$. Wait for the
2515 * invalidation complete
2516 */
2517 for (i = 0; i < usec_timeout; i++) {
2518 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2519 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2520 INVALIDATE_CACHE_COMPLETE))
2521 break;
2522 udelay(1);
2523 }
2524
2525 if (i >= usec_timeout) {
2526 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2527 return -EINVAL;
2528 }
2529
2530 /* Prime the L1 instruction caches */
2531 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2532 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2533 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2534 /* Waiting for cache primed*/
2535 for (i = 0; i < usec_timeout; i++) {
2536 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2537 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2538 ICACHE_PRIMED))
2539 break;
2540 udelay(1);
2541 }
2542
2543 if (i >= usec_timeout) {
2544 dev_err(adev->dev, "failed to prime instruction cache\n");
2545 return -EINVAL;
2546 }
2547
2548 mutex_lock(&adev->srbm_mutex);
2549 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2550 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2551 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2552 (pfp_hdr->ucode_start_addr_hi << 30) |
2553 (pfp_hdr->ucode_start_addr_lo >> 2));
2554 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2555 pfp_hdr->ucode_start_addr_hi >> 2);
2556
2557 /*
2558 * Program CP_ME_CNTL to reset given PIPE to take
2559 * effect of CP_PFP_PRGRM_CNTR_START.
2560 */
2561 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2562 if (pipe_id == 0)
2563 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2564 PFP_PIPE0_RESET, 1);
2565 else
2566 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2567 PFP_PIPE1_RESET, 1);
2568 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2569
2570 /* Clear pfp pipe0 reset bit. */
2571 if (pipe_id == 0)
2572 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2573 PFP_PIPE0_RESET, 0);
2574 else
2575 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2576 PFP_PIPE1_RESET, 0);
2577 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2578
2579 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2580 lower_32_bits(addr2));
2581 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2582 upper_32_bits(addr2));
2583 }
2584 soc21_grbm_select(adev, 0, 0, 0, 0);
2585 mutex_unlock(&adev->srbm_mutex);
2586
2587 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2588 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2589 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2590 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2591
2592 /* Invalidate the data caches */
2593 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2594 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2595 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2596
2597 for (i = 0; i < usec_timeout; i++) {
2598 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2599 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2600 INVALIDATE_DCACHE_COMPLETE))
2601 break;
2602 udelay(1);
2603 }
2604
2605 if (i >= usec_timeout) {
2606 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2607 return -EINVAL;
2608 }
2609
2610 return 0;
2611 }
2612
gfx_v11_0_config_me_cache_rs64(struct amdgpu_device * adev,uint64_t addr,uint64_t addr2)2613 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2614 {
2615 uint32_t usec_timeout = 50000; /* wait for 50ms */
2616 uint32_t tmp;
2617 unsigned i, pipe_id;
2618 const struct gfx_firmware_header_v2_0 *me_hdr;
2619
2620 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2621 adev->gfx.me_fw->data;
2622
2623 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2624 lower_32_bits(addr));
2625 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2626 upper_32_bits(addr));
2627
2628 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2629 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2630 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2631 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2632 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2633
2634 /*
2635 * Programming any of the CP_ME_IC_BASE registers
2636 * forces invalidation of the ME L1 I$. Wait for the
2637 * invalidation complete
2638 */
2639 for (i = 0; i < usec_timeout; i++) {
2640 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2641 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2642 INVALIDATE_CACHE_COMPLETE))
2643 break;
2644 udelay(1);
2645 }
2646
2647 if (i >= usec_timeout) {
2648 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2649 return -EINVAL;
2650 }
2651
2652 /* Prime the instruction caches */
2653 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2654 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2655 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2656
2657 /* Waiting for instruction cache primed*/
2658 for (i = 0; i < usec_timeout; i++) {
2659 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2660 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2661 ICACHE_PRIMED))
2662 break;
2663 udelay(1);
2664 }
2665
2666 if (i >= usec_timeout) {
2667 dev_err(adev->dev, "failed to prime instruction cache\n");
2668 return -EINVAL;
2669 }
2670
2671 mutex_lock(&adev->srbm_mutex);
2672 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2673 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2674 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2675 (me_hdr->ucode_start_addr_hi << 30) |
2676 (me_hdr->ucode_start_addr_lo >> 2) );
2677 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2678 me_hdr->ucode_start_addr_hi>>2);
2679
2680 /*
2681 * Program CP_ME_CNTL to reset given PIPE to take
2682 * effect of CP_PFP_PRGRM_CNTR_START.
2683 */
2684 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2685 if (pipe_id == 0)
2686 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2687 ME_PIPE0_RESET, 1);
2688 else
2689 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2690 ME_PIPE1_RESET, 1);
2691 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2692
2693 /* Clear pfp pipe0 reset bit. */
2694 if (pipe_id == 0)
2695 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2696 ME_PIPE0_RESET, 0);
2697 else
2698 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2699 ME_PIPE1_RESET, 0);
2700 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2701
2702 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2703 lower_32_bits(addr2));
2704 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2705 upper_32_bits(addr2));
2706 }
2707 soc21_grbm_select(adev, 0, 0, 0, 0);
2708 mutex_unlock(&adev->srbm_mutex);
2709
2710 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2711 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2712 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2713 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2714
2715 /* Invalidate the data caches */
2716 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2717 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2718 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2719
2720 for (i = 0; i < usec_timeout; i++) {
2721 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2722 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2723 INVALIDATE_DCACHE_COMPLETE))
2724 break;
2725 udelay(1);
2726 }
2727
2728 if (i >= usec_timeout) {
2729 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2730 return -EINVAL;
2731 }
2732
2733 return 0;
2734 }
2735
gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device * adev,uint64_t addr,uint64_t addr2)2736 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2737 {
2738 uint32_t usec_timeout = 50000; /* wait for 50ms */
2739 uint32_t tmp;
2740 unsigned i;
2741 const struct gfx_firmware_header_v2_0 *mec_hdr;
2742
2743 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2744 adev->gfx.mec_fw->data;
2745
2746 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2747 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2748 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2749 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2750 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2751
2752 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2753 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2754 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2755 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2756
2757 mutex_lock(&adev->srbm_mutex);
2758 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2759 soc21_grbm_select(adev, 1, i, 0, 0);
2760
2761 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2762 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2763 upper_32_bits(addr2));
2764
2765 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2766 mec_hdr->ucode_start_addr_lo >> 2 |
2767 mec_hdr->ucode_start_addr_hi << 30);
2768 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2769 mec_hdr->ucode_start_addr_hi >> 2);
2770
2771 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2772 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2773 upper_32_bits(addr));
2774 }
2775 mutex_unlock(&adev->srbm_mutex);
2776 soc21_grbm_select(adev, 0, 0, 0, 0);
2777
2778 /* Trigger an invalidation of the L1 instruction caches */
2779 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2780 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2781 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2782
2783 /* Wait for invalidation complete */
2784 for (i = 0; i < usec_timeout; i++) {
2785 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2786 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2787 INVALIDATE_DCACHE_COMPLETE))
2788 break;
2789 udelay(1);
2790 }
2791
2792 if (i >= usec_timeout) {
2793 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2794 return -EINVAL;
2795 }
2796
2797 /* Trigger an invalidation of the L1 instruction caches */
2798 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2799 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2800 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2801
2802 /* Wait for invalidation complete */
2803 for (i = 0; i < usec_timeout; i++) {
2804 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2805 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2806 INVALIDATE_CACHE_COMPLETE))
2807 break;
2808 udelay(1);
2809 }
2810
2811 if (i >= usec_timeout) {
2812 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2813 return -EINVAL;
2814 }
2815
2816 return 0;
2817 }
2818
gfx_v11_0_config_gfx_rs64(struct amdgpu_device * adev)2819 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2820 {
2821 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2822 const struct gfx_firmware_header_v2_0 *me_hdr;
2823 const struct gfx_firmware_header_v2_0 *mec_hdr;
2824 uint32_t pipe_id, tmp;
2825
2826 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2827 adev->gfx.mec_fw->data;
2828 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2829 adev->gfx.me_fw->data;
2830 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2831 adev->gfx.pfp_fw->data;
2832
2833 /* config pfp program start addr */
2834 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2835 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2836 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2837 (pfp_hdr->ucode_start_addr_hi << 30) |
2838 (pfp_hdr->ucode_start_addr_lo >> 2));
2839 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2840 pfp_hdr->ucode_start_addr_hi >> 2);
2841 }
2842 soc21_grbm_select(adev, 0, 0, 0, 0);
2843
2844 /* reset pfp pipe */
2845 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2846 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2847 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2848 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2849
2850 /* clear pfp pipe reset */
2851 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2852 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2853 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2854
2855 /* config me program start addr */
2856 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2857 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2858 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2859 (me_hdr->ucode_start_addr_hi << 30) |
2860 (me_hdr->ucode_start_addr_lo >> 2) );
2861 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2862 me_hdr->ucode_start_addr_hi>>2);
2863 }
2864 soc21_grbm_select(adev, 0, 0, 0, 0);
2865
2866 /* reset me pipe */
2867 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2868 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2869 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2870 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2871
2872 /* clear me pipe reset */
2873 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2874 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2875 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2876
2877 /* config mec program start addr */
2878 for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2879 soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2880 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2881 mec_hdr->ucode_start_addr_lo >> 2 |
2882 mec_hdr->ucode_start_addr_hi << 30);
2883 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2884 mec_hdr->ucode_start_addr_hi >> 2);
2885 }
2886 soc21_grbm_select(adev, 0, 0, 0, 0);
2887
2888 /* reset mec pipe */
2889 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2890 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
2891 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
2892 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
2893 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
2894 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2895
2896 /* clear mec pipe reset */
2897 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
2898 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
2899 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
2900 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
2901 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2902 }
2903
gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device * adev)2904 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2905 {
2906 uint32_t cp_status;
2907 uint32_t bootload_status;
2908 int i, r;
2909 uint64_t addr, addr2;
2910
2911 for (i = 0; i < adev->usec_timeout; i++) {
2912 cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2913
2914 if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
2915 IP_VERSION(11, 0, 1) ||
2916 amdgpu_ip_version(adev, GC_HWIP, 0) ==
2917 IP_VERSION(11, 0, 4) ||
2918 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) ||
2919 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1) ||
2920 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 2))
2921 bootload_status = RREG32_SOC15(GC, 0,
2922 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
2923 else
2924 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2925
2926 if ((cp_status == 0) &&
2927 (REG_GET_FIELD(bootload_status,
2928 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2929 break;
2930 }
2931 udelay(1);
2932 }
2933
2934 if (i >= adev->usec_timeout) {
2935 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2936 return -ETIMEDOUT;
2937 }
2938
2939 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2940 if (adev->gfx.rs64_enable) {
2941 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2942 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
2943 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2944 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
2945 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
2946 if (r)
2947 return r;
2948 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2949 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
2950 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2951 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
2952 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
2953 if (r)
2954 return r;
2955 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2956 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
2957 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2958 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
2959 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
2960 if (r)
2961 return r;
2962 } else {
2963 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2964 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
2965 r = gfx_v11_0_config_me_cache(adev, addr);
2966 if (r)
2967 return r;
2968 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2969 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
2970 r = gfx_v11_0_config_pfp_cache(adev, addr);
2971 if (r)
2972 return r;
2973 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2974 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
2975 r = gfx_v11_0_config_mec_cache(adev, addr);
2976 if (r)
2977 return r;
2978 }
2979 }
2980
2981 return 0;
2982 }
2983
gfx_v11_0_cp_gfx_enable(struct amdgpu_device * adev,bool enable)2984 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2985 {
2986 int i;
2987 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2988
2989 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2990 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2991 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2992
2993 for (i = 0; i < adev->usec_timeout; i++) {
2994 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
2995 break;
2996 udelay(1);
2997 }
2998
2999 if (i >= adev->usec_timeout)
3000 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
3001
3002 return 0;
3003 }
3004
gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device * adev)3005 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
3006 {
3007 int r;
3008 const struct gfx_firmware_header_v1_0 *pfp_hdr;
3009 const __le32 *fw_data;
3010 unsigned i, fw_size;
3011
3012 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3013 adev->gfx.pfp_fw->data;
3014
3015 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3016
3017 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
3018 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3019 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
3020
3021 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
3022 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3023 &adev->gfx.pfp.pfp_fw_obj,
3024 &adev->gfx.pfp.pfp_fw_gpu_addr,
3025 (void **)&adev->gfx.pfp.pfp_fw_ptr);
3026 if (r) {
3027 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
3028 gfx_v11_0_pfp_fini(adev);
3029 return r;
3030 }
3031
3032 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
3033
3034 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
3035 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
3036
3037 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
3038
3039 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
3040
3041 for (i = 0; i < pfp_hdr->jt_size; i++)
3042 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
3043 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
3044
3045 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3046
3047 return 0;
3048 }
3049
gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device * adev)3050 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
3051 {
3052 int r;
3053 const struct gfx_firmware_header_v2_0 *pfp_hdr;
3054 const __le32 *fw_ucode, *fw_data;
3055 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3056 uint32_t tmp;
3057 uint32_t usec_timeout = 50000; /* wait for 50ms */
3058
3059 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
3060 adev->gfx.pfp_fw->data;
3061
3062 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3063
3064 /* instruction */
3065 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
3066 le32_to_cpu(pfp_hdr->ucode_offset_bytes));
3067 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
3068 /* data */
3069 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
3070 le32_to_cpu(pfp_hdr->data_offset_bytes));
3071 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
3072
3073 /* 64kb align */
3074 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3075 64 * 1024,
3076 AMDGPU_GEM_DOMAIN_VRAM |
3077 AMDGPU_GEM_DOMAIN_GTT,
3078 &adev->gfx.pfp.pfp_fw_obj,
3079 &adev->gfx.pfp.pfp_fw_gpu_addr,
3080 (void **)&adev->gfx.pfp.pfp_fw_ptr);
3081 if (r) {
3082 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
3083 gfx_v11_0_pfp_fini(adev);
3084 return r;
3085 }
3086
3087 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3088 64 * 1024,
3089 AMDGPU_GEM_DOMAIN_VRAM |
3090 AMDGPU_GEM_DOMAIN_GTT,
3091 &adev->gfx.pfp.pfp_fw_data_obj,
3092 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
3093 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
3094 if (r) {
3095 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
3096 gfx_v11_0_pfp_fini(adev);
3097 return r;
3098 }
3099
3100 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
3101 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
3102
3103 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
3104 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
3105 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
3106 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
3107
3108 if (amdgpu_emu_mode == 1)
3109 adev->hdp.funcs->flush_hdp(adev, NULL);
3110
3111 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
3112 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
3113 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
3114 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
3115
3116 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
3117 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
3118 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
3119 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
3120 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
3121
3122 /*
3123 * Programming any of the CP_PFP_IC_BASE registers
3124 * forces invalidation of the ME L1 I$. Wait for the
3125 * invalidation complete
3126 */
3127 for (i = 0; i < usec_timeout; i++) {
3128 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3129 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3130 INVALIDATE_CACHE_COMPLETE))
3131 break;
3132 udelay(1);
3133 }
3134
3135 if (i >= usec_timeout) {
3136 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3137 return -EINVAL;
3138 }
3139
3140 /* Prime the L1 instruction caches */
3141 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3142 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
3143 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
3144 /* Waiting for cache primed*/
3145 for (i = 0; i < usec_timeout; i++) {
3146 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
3147 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3148 ICACHE_PRIMED))
3149 break;
3150 udelay(1);
3151 }
3152
3153 if (i >= usec_timeout) {
3154 dev_err(adev->dev, "failed to prime instruction cache\n");
3155 return -EINVAL;
3156 }
3157
3158 mutex_lock(&adev->srbm_mutex);
3159 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3160 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3161 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
3162 (pfp_hdr->ucode_start_addr_hi << 30) |
3163 (pfp_hdr->ucode_start_addr_lo >> 2) );
3164 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
3165 pfp_hdr->ucode_start_addr_hi>>2);
3166
3167 /*
3168 * Program CP_ME_CNTL to reset given PIPE to take
3169 * effect of CP_PFP_PRGRM_CNTR_START.
3170 */
3171 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3172 if (pipe_id == 0)
3173 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3174 PFP_PIPE0_RESET, 1);
3175 else
3176 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3177 PFP_PIPE1_RESET, 1);
3178 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3179
3180 /* Clear pfp pipe0 reset bit. */
3181 if (pipe_id == 0)
3182 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3183 PFP_PIPE0_RESET, 0);
3184 else
3185 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3186 PFP_PIPE1_RESET, 0);
3187 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3188
3189 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
3190 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3191 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
3192 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3193 }
3194 soc21_grbm_select(adev, 0, 0, 0, 0);
3195 mutex_unlock(&adev->srbm_mutex);
3196
3197 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3198 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3199 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3200 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3201
3202 /* Invalidate the data caches */
3203 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3204 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3205 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3206
3207 for (i = 0; i < usec_timeout; i++) {
3208 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3209 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3210 INVALIDATE_DCACHE_COMPLETE))
3211 break;
3212 udelay(1);
3213 }
3214
3215 if (i >= usec_timeout) {
3216 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3217 return -EINVAL;
3218 }
3219
3220 return 0;
3221 }
3222
gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device * adev)3223 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
3224 {
3225 int r;
3226 const struct gfx_firmware_header_v1_0 *me_hdr;
3227 const __le32 *fw_data;
3228 unsigned i, fw_size;
3229
3230 me_hdr = (const struct gfx_firmware_header_v1_0 *)
3231 adev->gfx.me_fw->data;
3232
3233 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3234
3235 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3236 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3237 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
3238
3239 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
3240 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3241 &adev->gfx.me.me_fw_obj,
3242 &adev->gfx.me.me_fw_gpu_addr,
3243 (void **)&adev->gfx.me.me_fw_ptr);
3244 if (r) {
3245 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
3246 gfx_v11_0_me_fini(adev);
3247 return r;
3248 }
3249
3250 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
3251
3252 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3253 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3254
3255 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
3256
3257 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
3258
3259 for (i = 0; i < me_hdr->jt_size; i++)
3260 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
3261 le32_to_cpup(fw_data + me_hdr->jt_offset + i));
3262
3263 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
3264
3265 return 0;
3266 }
3267
gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device * adev)3268 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
3269 {
3270 int r;
3271 const struct gfx_firmware_header_v2_0 *me_hdr;
3272 const __le32 *fw_ucode, *fw_data;
3273 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3274 uint32_t tmp;
3275 uint32_t usec_timeout = 50000; /* wait for 50ms */
3276
3277 me_hdr = (const struct gfx_firmware_header_v2_0 *)
3278 adev->gfx.me_fw->data;
3279
3280 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3281
3282 /* instruction */
3283 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
3284 le32_to_cpu(me_hdr->ucode_offset_bytes));
3285 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
3286 /* data */
3287 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3288 le32_to_cpu(me_hdr->data_offset_bytes));
3289 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
3290
3291 /* 64kb align*/
3292 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3293 64 * 1024,
3294 AMDGPU_GEM_DOMAIN_VRAM |
3295 AMDGPU_GEM_DOMAIN_GTT,
3296 &adev->gfx.me.me_fw_obj,
3297 &adev->gfx.me.me_fw_gpu_addr,
3298 (void **)&adev->gfx.me.me_fw_ptr);
3299 if (r) {
3300 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
3301 gfx_v11_0_me_fini(adev);
3302 return r;
3303 }
3304
3305 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3306 64 * 1024,
3307 AMDGPU_GEM_DOMAIN_VRAM |
3308 AMDGPU_GEM_DOMAIN_GTT,
3309 &adev->gfx.me.me_fw_data_obj,
3310 &adev->gfx.me.me_fw_data_gpu_addr,
3311 (void **)&adev->gfx.me.me_fw_data_ptr);
3312 if (r) {
3313 dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
3314 gfx_v11_0_pfp_fini(adev);
3315 return r;
3316 }
3317
3318 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
3319 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
3320
3321 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3322 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
3323 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3324 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
3325
3326 if (amdgpu_emu_mode == 1)
3327 adev->hdp.funcs->flush_hdp(adev, NULL);
3328
3329 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
3330 lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
3331 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
3332 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
3333
3334 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
3335 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
3336 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
3337 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
3338 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
3339
3340 /*
3341 * Programming any of the CP_ME_IC_BASE registers
3342 * forces invalidation of the ME L1 I$. Wait for the
3343 * invalidation complete
3344 */
3345 for (i = 0; i < usec_timeout; i++) {
3346 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3347 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3348 INVALIDATE_CACHE_COMPLETE))
3349 break;
3350 udelay(1);
3351 }
3352
3353 if (i >= usec_timeout) {
3354 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3355 return -EINVAL;
3356 }
3357
3358 /* Prime the instruction caches */
3359 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3360 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3361 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3362
3363 /* Waiting for instruction cache primed*/
3364 for (i = 0; i < usec_timeout; i++) {
3365 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3366 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3367 ICACHE_PRIMED))
3368 break;
3369 udelay(1);
3370 }
3371
3372 if (i >= usec_timeout) {
3373 dev_err(adev->dev, "failed to prime instruction cache\n");
3374 return -EINVAL;
3375 }
3376
3377 mutex_lock(&adev->srbm_mutex);
3378 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3379 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3380 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3381 (me_hdr->ucode_start_addr_hi << 30) |
3382 (me_hdr->ucode_start_addr_lo >> 2) );
3383 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3384 me_hdr->ucode_start_addr_hi>>2);
3385
3386 /*
3387 * Program CP_ME_CNTL to reset given PIPE to take
3388 * effect of CP_PFP_PRGRM_CNTR_START.
3389 */
3390 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3391 if (pipe_id == 0)
3392 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3393 ME_PIPE0_RESET, 1);
3394 else
3395 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3396 ME_PIPE1_RESET, 1);
3397 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3398
3399 /* Clear pfp pipe0 reset bit. */
3400 if (pipe_id == 0)
3401 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3402 ME_PIPE0_RESET, 0);
3403 else
3404 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3405 ME_PIPE1_RESET, 0);
3406 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3407
3408 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3409 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3410 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3411 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3412 }
3413 soc21_grbm_select(adev, 0, 0, 0, 0);
3414 mutex_unlock(&adev->srbm_mutex);
3415
3416 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3417 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3418 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3419 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3420
3421 /* Invalidate the data caches */
3422 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3423 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3424 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3425
3426 for (i = 0; i < usec_timeout; i++) {
3427 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3428 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3429 INVALIDATE_DCACHE_COMPLETE))
3430 break;
3431 udelay(1);
3432 }
3433
3434 if (i >= usec_timeout) {
3435 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3436 return -EINVAL;
3437 }
3438
3439 return 0;
3440 }
3441
gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device * adev)3442 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3443 {
3444 int r;
3445
3446 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3447 return -EINVAL;
3448
3449 gfx_v11_0_cp_gfx_enable(adev, false);
3450
3451 if (adev->gfx.rs64_enable)
3452 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3453 else
3454 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3455 if (r) {
3456 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3457 return r;
3458 }
3459
3460 if (adev->gfx.rs64_enable)
3461 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3462 else
3463 r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3464 if (r) {
3465 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3466 return r;
3467 }
3468
3469 return 0;
3470 }
3471
gfx_v11_0_cp_gfx_start(struct amdgpu_device * adev)3472 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3473 {
3474 struct amdgpu_ring *ring;
3475 const struct cs_section_def *sect = NULL;
3476 const struct cs_extent_def *ext = NULL;
3477 int r, i;
3478 int ctx_reg_offset;
3479
3480 /* init the CP */
3481 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3482 adev->gfx.config.max_hw_contexts - 1);
3483 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3484
3485 if (!amdgpu_async_gfx_ring)
3486 gfx_v11_0_cp_gfx_enable(adev, true);
3487
3488 ring = &adev->gfx.gfx_ring[0];
3489 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3490 if (r) {
3491 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3492 return r;
3493 }
3494
3495 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3496 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3497
3498 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3499 amdgpu_ring_write(ring, 0x80000000);
3500 amdgpu_ring_write(ring, 0x80000000);
3501
3502 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3503 for (ext = sect->section; ext->extent != NULL; ++ext) {
3504 if (sect->id == SECT_CONTEXT) {
3505 amdgpu_ring_write(ring,
3506 PACKET3(PACKET3_SET_CONTEXT_REG,
3507 ext->reg_count));
3508 amdgpu_ring_write(ring, ext->reg_index -
3509 PACKET3_SET_CONTEXT_REG_START);
3510 for (i = 0; i < ext->reg_count; i++)
3511 amdgpu_ring_write(ring, ext->extent[i]);
3512 }
3513 }
3514 }
3515
3516 ctx_reg_offset =
3517 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3518 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3519 amdgpu_ring_write(ring, ctx_reg_offset);
3520 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3521
3522 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3523 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3524
3525 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3526 amdgpu_ring_write(ring, 0);
3527
3528 amdgpu_ring_commit(ring);
3529
3530 /* submit cs packet to copy state 0 to next available state */
3531 if (adev->gfx.num_gfx_rings > 1) {
3532 /* maximum supported gfx ring is 2 */
3533 ring = &adev->gfx.gfx_ring[1];
3534 r = amdgpu_ring_alloc(ring, 2);
3535 if (r) {
3536 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3537 return r;
3538 }
3539
3540 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3541 amdgpu_ring_write(ring, 0);
3542
3543 amdgpu_ring_commit(ring);
3544 }
3545 return 0;
3546 }
3547
gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device * adev,CP_PIPE_ID pipe)3548 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3549 CP_PIPE_ID pipe)
3550 {
3551 u32 tmp;
3552
3553 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3554 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3555
3556 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3557 }
3558
gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device * adev,struct amdgpu_ring * ring)3559 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3560 struct amdgpu_ring *ring)
3561 {
3562 u32 tmp;
3563
3564 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3565 if (ring->use_doorbell) {
3566 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3567 DOORBELL_OFFSET, ring->doorbell_index);
3568 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3569 DOORBELL_EN, 1);
3570 } else {
3571 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3572 DOORBELL_EN, 0);
3573 }
3574 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3575
3576 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3577 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3578 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3579
3580 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3581 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3582 }
3583
gfx_v11_0_cp_gfx_resume(struct amdgpu_device * adev)3584 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3585 {
3586 struct amdgpu_ring *ring;
3587 u32 tmp;
3588 u32 rb_bufsz;
3589 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3590
3591 /* Set the write pointer delay */
3592 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3593
3594 /* set the RB to use vmid 0 */
3595 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3596
3597 /* Init gfx ring 0 for pipe 0 */
3598 mutex_lock(&adev->srbm_mutex);
3599 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3600
3601 /* Set ring buffer size */
3602 ring = &adev->gfx.gfx_ring[0];
3603 rb_bufsz = order_base_2(ring->ring_size / 8);
3604 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3605 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3606 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3607
3608 /* Initialize the ring buffer's write pointers */
3609 ring->wptr = 0;
3610 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3611 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3612
3613 /* set the wb address whether it's enabled or not */
3614 rptr_addr = ring->rptr_gpu_addr;
3615 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3616 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3617 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3618
3619 wptr_gpu_addr = ring->wptr_gpu_addr;
3620 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3621 lower_32_bits(wptr_gpu_addr));
3622 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3623 upper_32_bits(wptr_gpu_addr));
3624
3625 mdelay(1);
3626 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3627
3628 rb_addr = ring->gpu_addr >> 8;
3629 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3630 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3631
3632 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3633
3634 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3635 mutex_unlock(&adev->srbm_mutex);
3636
3637 /* Init gfx ring 1 for pipe 1 */
3638 if (adev->gfx.num_gfx_rings > 1) {
3639 mutex_lock(&adev->srbm_mutex);
3640 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3641 /* maximum supported gfx ring is 2 */
3642 ring = &adev->gfx.gfx_ring[1];
3643 rb_bufsz = order_base_2(ring->ring_size / 8);
3644 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3645 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3646 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3647 /* Initialize the ring buffer's write pointers */
3648 ring->wptr = 0;
3649 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3650 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3651 /* Set the wb address whether it's enabled or not */
3652 rptr_addr = ring->rptr_gpu_addr;
3653 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3654 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3655 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3656 wptr_gpu_addr = ring->wptr_gpu_addr;
3657 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3658 lower_32_bits(wptr_gpu_addr));
3659 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3660 upper_32_bits(wptr_gpu_addr));
3661
3662 mdelay(1);
3663 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3664
3665 rb_addr = ring->gpu_addr >> 8;
3666 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3667 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3668 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3669
3670 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3671 mutex_unlock(&adev->srbm_mutex);
3672 }
3673 /* Switch to pipe 0 */
3674 mutex_lock(&adev->srbm_mutex);
3675 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3676 mutex_unlock(&adev->srbm_mutex);
3677
3678 /* start the ring */
3679 gfx_v11_0_cp_gfx_start(adev);
3680
3681 return 0;
3682 }
3683
gfx_v11_0_cp_compute_enable(struct amdgpu_device * adev,bool enable)3684 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3685 {
3686 u32 data;
3687
3688 if (adev->gfx.rs64_enable) {
3689 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3690 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3691 enable ? 0 : 1);
3692 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3693 enable ? 0 : 1);
3694 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3695 enable ? 0 : 1);
3696 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3697 enable ? 0 : 1);
3698 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3699 enable ? 0 : 1);
3700 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3701 enable ? 1 : 0);
3702 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3703 enable ? 1 : 0);
3704 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3705 enable ? 1 : 0);
3706 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3707 enable ? 1 : 0);
3708 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3709 enable ? 0 : 1);
3710 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3711 } else {
3712 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3713
3714 if (enable) {
3715 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3716 if (!adev->enable_mes_kiq)
3717 data = REG_SET_FIELD(data, CP_MEC_CNTL,
3718 MEC_ME2_HALT, 0);
3719 } else {
3720 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3721 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3722 }
3723 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3724 }
3725
3726 udelay(50);
3727 }
3728
gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device * adev)3729 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3730 {
3731 const struct gfx_firmware_header_v1_0 *mec_hdr;
3732 const __le32 *fw_data;
3733 unsigned i, fw_size;
3734 u32 *fw = NULL;
3735 int r;
3736
3737 if (!adev->gfx.mec_fw)
3738 return -EINVAL;
3739
3740 gfx_v11_0_cp_compute_enable(adev, false);
3741
3742 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3743 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3744
3745 fw_data = (const __le32 *)
3746 (adev->gfx.mec_fw->data +
3747 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3748 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3749
3750 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3751 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3752 &adev->gfx.mec.mec_fw_obj,
3753 &adev->gfx.mec.mec_fw_gpu_addr,
3754 (void **)&fw);
3755 if (r) {
3756 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3757 gfx_v11_0_mec_fini(adev);
3758 return r;
3759 }
3760
3761 memcpy(fw, fw_data, fw_size);
3762
3763 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3764 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3765
3766 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3767
3768 /* MEC1 */
3769 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3770
3771 for (i = 0; i < mec_hdr->jt_size; i++)
3772 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3773 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3774
3775 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3776
3777 return 0;
3778 }
3779
gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device * adev)3780 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3781 {
3782 const struct gfx_firmware_header_v2_0 *mec_hdr;
3783 const __le32 *fw_ucode, *fw_data;
3784 u32 tmp, fw_ucode_size, fw_data_size;
3785 u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3786 u32 *fw_ucode_ptr, *fw_data_ptr;
3787 int r;
3788
3789 if (!adev->gfx.mec_fw)
3790 return -EINVAL;
3791
3792 gfx_v11_0_cp_compute_enable(adev, false);
3793
3794 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3795 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3796
3797 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3798 le32_to_cpu(mec_hdr->ucode_offset_bytes));
3799 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3800
3801 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3802 le32_to_cpu(mec_hdr->data_offset_bytes));
3803 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3804
3805 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3806 64 * 1024,
3807 AMDGPU_GEM_DOMAIN_VRAM |
3808 AMDGPU_GEM_DOMAIN_GTT,
3809 &adev->gfx.mec.mec_fw_obj,
3810 &adev->gfx.mec.mec_fw_gpu_addr,
3811 (void **)&fw_ucode_ptr);
3812 if (r) {
3813 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3814 gfx_v11_0_mec_fini(adev);
3815 return r;
3816 }
3817
3818 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3819 64 * 1024,
3820 AMDGPU_GEM_DOMAIN_VRAM |
3821 AMDGPU_GEM_DOMAIN_GTT,
3822 &adev->gfx.mec.mec_fw_data_obj,
3823 &adev->gfx.mec.mec_fw_data_gpu_addr,
3824 (void **)&fw_data_ptr);
3825 if (r) {
3826 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3827 gfx_v11_0_mec_fini(adev);
3828 return r;
3829 }
3830
3831 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3832 memcpy(fw_data_ptr, fw_data, fw_data_size);
3833
3834 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3835 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3836 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3837 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3838
3839 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3840 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3841 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3842 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3843 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3844
3845 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3846 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3847 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3848 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3849
3850 mutex_lock(&adev->srbm_mutex);
3851 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3852 soc21_grbm_select(adev, 1, i, 0, 0);
3853
3854 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3855 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3856 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3857
3858 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3859 mec_hdr->ucode_start_addr_lo >> 2 |
3860 mec_hdr->ucode_start_addr_hi << 30);
3861 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3862 mec_hdr->ucode_start_addr_hi >> 2);
3863
3864 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3865 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3866 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3867 }
3868 mutex_unlock(&adev->srbm_mutex);
3869 soc21_grbm_select(adev, 0, 0, 0, 0);
3870
3871 /* Trigger an invalidation of the L1 instruction caches */
3872 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3873 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3874 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3875
3876 /* Wait for invalidation complete */
3877 for (i = 0; i < usec_timeout; i++) {
3878 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3879 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3880 INVALIDATE_DCACHE_COMPLETE))
3881 break;
3882 udelay(1);
3883 }
3884
3885 if (i >= usec_timeout) {
3886 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3887 return -EINVAL;
3888 }
3889
3890 /* Trigger an invalidation of the L1 instruction caches */
3891 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3892 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
3893 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
3894
3895 /* Wait for invalidation complete */
3896 for (i = 0; i < usec_timeout; i++) {
3897 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3898 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
3899 INVALIDATE_CACHE_COMPLETE))
3900 break;
3901 udelay(1);
3902 }
3903
3904 if (i >= usec_timeout) {
3905 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3906 return -EINVAL;
3907 }
3908
3909 return 0;
3910 }
3911
gfx_v11_0_kiq_setting(struct amdgpu_ring * ring)3912 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
3913 {
3914 uint32_t tmp;
3915 struct amdgpu_device *adev = ring->adev;
3916
3917 /* tell RLC which is KIQ queue */
3918 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3919 tmp &= 0xffffff00;
3920 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3921 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3922 tmp |= 0x80;
3923 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3924 }
3925
gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device * adev)3926 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
3927 {
3928 /* set graphics engine doorbell range */
3929 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
3930 (adev->doorbell_index.gfx_ring0 * 2) << 2);
3931 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3932 (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
3933
3934 /* set compute engine doorbell range */
3935 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3936 (adev->doorbell_index.kiq * 2) << 2);
3937 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3938 (adev->doorbell_index.userqueue_end * 2) << 2);
3939 }
3940
gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device * adev,struct v11_gfx_mqd * mqd,struct amdgpu_mqd_prop * prop)3941 static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
3942 struct v11_gfx_mqd *mqd,
3943 struct amdgpu_mqd_prop *prop)
3944 {
3945 bool priority = 0;
3946 u32 tmp;
3947
3948 /* set up default queue priority level
3949 * 0x0 = low priority, 0x1 = high priority
3950 */
3951 if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
3952 priority = 1;
3953
3954 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
3955 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
3956 mqd->cp_gfx_hqd_queue_priority = tmp;
3957 }
3958
gfx_v11_0_gfx_mqd_init(struct amdgpu_device * adev,void * m,struct amdgpu_mqd_prop * prop)3959 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
3960 struct amdgpu_mqd_prop *prop)
3961 {
3962 struct v11_gfx_mqd *mqd = m;
3963 uint64_t hqd_gpu_addr, wb_gpu_addr;
3964 uint32_t tmp;
3965 uint32_t rb_bufsz;
3966
3967 /* set up gfx hqd wptr */
3968 mqd->cp_gfx_hqd_wptr = 0;
3969 mqd->cp_gfx_hqd_wptr_hi = 0;
3970
3971 /* set the pointer to the MQD */
3972 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
3973 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3974
3975 /* set up mqd control */
3976 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
3977 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3978 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3979 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3980 mqd->cp_gfx_mqd_control = tmp;
3981
3982 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3983 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
3984 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3985 mqd->cp_gfx_hqd_vmid = 0;
3986
3987 /* set up gfx queue priority */
3988 gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
3989
3990 /* set up time quantum */
3991 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
3992 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3993 mqd->cp_gfx_hqd_quantum = tmp;
3994
3995 /* set up gfx hqd base. this is similar as CP_RB_BASE */
3996 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3997 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3998 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3999
4000 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
4001 wb_gpu_addr = prop->rptr_gpu_addr;
4002 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
4003 mqd->cp_gfx_hqd_rptr_addr_hi =
4004 upper_32_bits(wb_gpu_addr) & 0xffff;
4005
4006 /* set up rb_wptr_poll addr */
4007 wb_gpu_addr = prop->wptr_gpu_addr;
4008 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4009 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4010
4011 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
4012 rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
4013 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
4014 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
4015 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
4016 #ifdef __BIG_ENDIAN
4017 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
4018 #endif
4019 mqd->cp_gfx_hqd_cntl = tmp;
4020
4021 /* set up cp_doorbell_control */
4022 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
4023 if (prop->use_doorbell) {
4024 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4025 DOORBELL_OFFSET, prop->doorbell_index);
4026 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4027 DOORBELL_EN, 1);
4028 } else
4029 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4030 DOORBELL_EN, 0);
4031 mqd->cp_rb_doorbell_control = tmp;
4032
4033 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4034 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
4035
4036 /* active the queue */
4037 mqd->cp_gfx_hqd_active = 1;
4038
4039 return 0;
4040 }
4041
gfx_v11_0_kgq_init_queue(struct amdgpu_ring * ring,bool reset)4042 static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
4043 {
4044 struct amdgpu_device *adev = ring->adev;
4045 struct v11_gfx_mqd *mqd = ring->mqd_ptr;
4046 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
4047
4048 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
4049 memset((void *)mqd, 0, sizeof(*mqd));
4050 mutex_lock(&adev->srbm_mutex);
4051 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4052 amdgpu_ring_init_mqd(ring);
4053 soc21_grbm_select(adev, 0, 0, 0, 0);
4054 mutex_unlock(&adev->srbm_mutex);
4055 if (adev->gfx.me.mqd_backup[mqd_idx])
4056 memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4057 } else {
4058 /* restore mqd with the backup copy */
4059 if (adev->gfx.me.mqd_backup[mqd_idx])
4060 memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
4061 /* reset the ring */
4062 ring->wptr = 0;
4063 *ring->wptr_cpu_addr = 0;
4064 amdgpu_ring_clear_ring(ring);
4065 }
4066
4067 return 0;
4068 }
4069
gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device * adev)4070 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
4071 {
4072 int r, i;
4073 struct amdgpu_ring *ring;
4074
4075 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4076 ring = &adev->gfx.gfx_ring[i];
4077
4078 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4079 if (unlikely(r != 0))
4080 return r;
4081
4082 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4083 if (!r) {
4084 r = gfx_v11_0_kgq_init_queue(ring, false);
4085 amdgpu_bo_kunmap(ring->mqd_obj);
4086 ring->mqd_ptr = NULL;
4087 }
4088 amdgpu_bo_unreserve(ring->mqd_obj);
4089 if (r)
4090 return r;
4091 }
4092
4093 r = amdgpu_gfx_enable_kgq(adev, 0);
4094 if (r)
4095 return r;
4096
4097 return gfx_v11_0_cp_gfx_start(adev);
4098 }
4099
gfx_v11_0_compute_mqd_init(struct amdgpu_device * adev,void * m,struct amdgpu_mqd_prop * prop)4100 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
4101 struct amdgpu_mqd_prop *prop)
4102 {
4103 struct v11_compute_mqd *mqd = m;
4104 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4105 uint32_t tmp;
4106
4107 mqd->header = 0xC0310800;
4108 mqd->compute_pipelinestat_enable = 0x00000001;
4109 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4110 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4111 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4112 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4113 mqd->compute_misc_reserved = 0x00000007;
4114
4115 eop_base_addr = prop->eop_gpu_addr >> 8;
4116 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4117 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4118
4119 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4120 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
4121 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4122 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
4123
4124 mqd->cp_hqd_eop_control = tmp;
4125
4126 /* enable doorbell? */
4127 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
4128
4129 if (prop->use_doorbell) {
4130 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4131 DOORBELL_OFFSET, prop->doorbell_index);
4132 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4133 DOORBELL_EN, 1);
4134 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4135 DOORBELL_SOURCE, 0);
4136 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4137 DOORBELL_HIT, 0);
4138 } else {
4139 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4140 DOORBELL_EN, 0);
4141 }
4142
4143 mqd->cp_hqd_pq_doorbell_control = tmp;
4144
4145 /* disable the queue if it's active */
4146 mqd->cp_hqd_dequeue_request = 0;
4147 mqd->cp_hqd_pq_rptr = 0;
4148 mqd->cp_hqd_pq_wptr_lo = 0;
4149 mqd->cp_hqd_pq_wptr_hi = 0;
4150
4151 /* set the pointer to the MQD */
4152 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
4153 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
4154
4155 /* set MQD vmid to 0 */
4156 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
4157 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4158 mqd->cp_mqd_control = tmp;
4159
4160 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4161 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
4162 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4163 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4164
4165 /* set up the HQD, this is similar to CP_RB0_CNTL */
4166 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
4167 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4168 (order_base_2(prop->queue_size / 4) - 1));
4169 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4170 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4171 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
4172 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
4173 prop->allow_tunneling);
4174 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4175 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4176 mqd->cp_hqd_pq_control = tmp;
4177
4178 /* set the wb address whether it's enabled or not */
4179 wb_gpu_addr = prop->rptr_gpu_addr;
4180 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4181 mqd->cp_hqd_pq_rptr_report_addr_hi =
4182 upper_32_bits(wb_gpu_addr) & 0xffff;
4183
4184 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4185 wb_gpu_addr = prop->wptr_gpu_addr;
4186 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4187 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4188
4189 tmp = 0;
4190 /* enable the doorbell if requested */
4191 if (prop->use_doorbell) {
4192 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
4193 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4194 DOORBELL_OFFSET, prop->doorbell_index);
4195
4196 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4197 DOORBELL_EN, 1);
4198 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4199 DOORBELL_SOURCE, 0);
4200 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4201 DOORBELL_HIT, 0);
4202 }
4203
4204 mqd->cp_hqd_pq_doorbell_control = tmp;
4205
4206 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4207 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
4208
4209 /* set the vmid for the queue */
4210 mqd->cp_hqd_vmid = 0;
4211
4212 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
4213 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
4214 mqd->cp_hqd_persistent_state = tmp;
4215
4216 /* set MIN_IB_AVAIL_SIZE */
4217 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
4218 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4219 mqd->cp_hqd_ib_control = tmp;
4220
4221 /* set static priority for a compute queue/ring */
4222 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
4223 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
4224
4225 mqd->cp_hqd_active = prop->hqd_active;
4226
4227 return 0;
4228 }
4229
gfx_v11_0_kiq_init_register(struct amdgpu_ring * ring)4230 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
4231 {
4232 struct amdgpu_device *adev = ring->adev;
4233 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4234 int j;
4235
4236 /* inactivate the queue */
4237 if (amdgpu_sriov_vf(adev))
4238 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
4239
4240 /* disable wptr polling */
4241 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4242
4243 /* write the EOP addr */
4244 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
4245 mqd->cp_hqd_eop_base_addr_lo);
4246 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
4247 mqd->cp_hqd_eop_base_addr_hi);
4248
4249 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4250 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
4251 mqd->cp_hqd_eop_control);
4252
4253 /* enable doorbell? */
4254 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4255 mqd->cp_hqd_pq_doorbell_control);
4256
4257 /* disable the queue if it's active */
4258 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
4259 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
4260 for (j = 0; j < adev->usec_timeout; j++) {
4261 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
4262 break;
4263 udelay(1);
4264 }
4265 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
4266 mqd->cp_hqd_dequeue_request);
4267 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
4268 mqd->cp_hqd_pq_rptr);
4269 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4270 mqd->cp_hqd_pq_wptr_lo);
4271 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4272 mqd->cp_hqd_pq_wptr_hi);
4273 }
4274
4275 /* set the pointer to the MQD */
4276 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
4277 mqd->cp_mqd_base_addr_lo);
4278 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
4279 mqd->cp_mqd_base_addr_hi);
4280
4281 /* set MQD vmid to 0 */
4282 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
4283 mqd->cp_mqd_control);
4284
4285 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4286 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
4287 mqd->cp_hqd_pq_base_lo);
4288 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
4289 mqd->cp_hqd_pq_base_hi);
4290
4291 /* set up the HQD, this is similar to CP_RB0_CNTL */
4292 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
4293 mqd->cp_hqd_pq_control);
4294
4295 /* set the wb address whether it's enabled or not */
4296 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
4297 mqd->cp_hqd_pq_rptr_report_addr_lo);
4298 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
4299 mqd->cp_hqd_pq_rptr_report_addr_hi);
4300
4301 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4302 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
4303 mqd->cp_hqd_pq_wptr_poll_addr_lo);
4304 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
4305 mqd->cp_hqd_pq_wptr_poll_addr_hi);
4306
4307 /* enable the doorbell if requested */
4308 if (ring->use_doorbell) {
4309 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
4310 (adev->doorbell_index.kiq * 2) << 2);
4311 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
4312 (adev->doorbell_index.userqueue_end * 2) << 2);
4313 }
4314
4315 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4316 mqd->cp_hqd_pq_doorbell_control);
4317
4318 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4319 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4320 mqd->cp_hqd_pq_wptr_lo);
4321 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4322 mqd->cp_hqd_pq_wptr_hi);
4323
4324 /* set the vmid for the queue */
4325 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
4326
4327 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
4328 mqd->cp_hqd_persistent_state);
4329
4330 /* activate the queue */
4331 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
4332 mqd->cp_hqd_active);
4333
4334 if (ring->use_doorbell)
4335 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4336
4337 return 0;
4338 }
4339
gfx_v11_0_kiq_init_queue(struct amdgpu_ring * ring)4340 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
4341 {
4342 struct amdgpu_device *adev = ring->adev;
4343 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4344
4345 gfx_v11_0_kiq_setting(ring);
4346
4347 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4348 /* reset MQD to a clean status */
4349 if (adev->gfx.kiq[0].mqd_backup)
4350 memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
4351
4352 /* reset ring buffer */
4353 ring->wptr = 0;
4354 amdgpu_ring_clear_ring(ring);
4355
4356 mutex_lock(&adev->srbm_mutex);
4357 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4358 gfx_v11_0_kiq_init_register(ring);
4359 soc21_grbm_select(adev, 0, 0, 0, 0);
4360 mutex_unlock(&adev->srbm_mutex);
4361 } else {
4362 memset((void *)mqd, 0, sizeof(*mqd));
4363 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4364 amdgpu_ring_clear_ring(ring);
4365 mutex_lock(&adev->srbm_mutex);
4366 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4367 amdgpu_ring_init_mqd(ring);
4368 gfx_v11_0_kiq_init_register(ring);
4369 soc21_grbm_select(adev, 0, 0, 0, 0);
4370 mutex_unlock(&adev->srbm_mutex);
4371
4372 if (adev->gfx.kiq[0].mqd_backup)
4373 memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
4374 }
4375
4376 return 0;
4377 }
4378
gfx_v11_0_kcq_init_queue(struct amdgpu_ring * ring,bool reset)4379 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
4380 {
4381 struct amdgpu_device *adev = ring->adev;
4382 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4383 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4384
4385 if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
4386 memset((void *)mqd, 0, sizeof(*mqd));
4387 mutex_lock(&adev->srbm_mutex);
4388 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4389 amdgpu_ring_init_mqd(ring);
4390 soc21_grbm_select(adev, 0, 0, 0, 0);
4391 mutex_unlock(&adev->srbm_mutex);
4392
4393 if (adev->gfx.mec.mqd_backup[mqd_idx])
4394 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4395 } else {
4396 /* restore MQD to a clean status */
4397 if (adev->gfx.mec.mqd_backup[mqd_idx])
4398 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4399 /* reset ring buffer */
4400 ring->wptr = 0;
4401 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4402 amdgpu_ring_clear_ring(ring);
4403 }
4404
4405 return 0;
4406 }
4407
gfx_v11_0_kiq_resume(struct amdgpu_device * adev)4408 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4409 {
4410 struct amdgpu_ring *ring;
4411 int r;
4412
4413 ring = &adev->gfx.kiq[0].ring;
4414
4415 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4416 if (unlikely(r != 0))
4417 return r;
4418
4419 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4420 if (unlikely(r != 0)) {
4421 amdgpu_bo_unreserve(ring->mqd_obj);
4422 return r;
4423 }
4424
4425 gfx_v11_0_kiq_init_queue(ring);
4426 amdgpu_bo_kunmap(ring->mqd_obj);
4427 ring->mqd_ptr = NULL;
4428 amdgpu_bo_unreserve(ring->mqd_obj);
4429 ring->sched.ready = true;
4430 return 0;
4431 }
4432
gfx_v11_0_kcq_resume(struct amdgpu_device * adev)4433 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4434 {
4435 struct amdgpu_ring *ring = NULL;
4436 int r = 0, i;
4437
4438 if (!amdgpu_async_gfx_ring)
4439 gfx_v11_0_cp_compute_enable(adev, true);
4440
4441 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4442 ring = &adev->gfx.compute_ring[i];
4443
4444 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4445 if (unlikely(r != 0))
4446 goto done;
4447 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4448 if (!r) {
4449 r = gfx_v11_0_kcq_init_queue(ring, false);
4450 amdgpu_bo_kunmap(ring->mqd_obj);
4451 ring->mqd_ptr = NULL;
4452 }
4453 amdgpu_bo_unreserve(ring->mqd_obj);
4454 if (r)
4455 goto done;
4456 }
4457
4458 r = amdgpu_gfx_enable_kcq(adev, 0);
4459 done:
4460 return r;
4461 }
4462
gfx_v11_0_cp_resume(struct amdgpu_device * adev)4463 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4464 {
4465 int r, i;
4466 struct amdgpu_ring *ring;
4467
4468 if (!(adev->flags & AMD_IS_APU))
4469 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4470
4471 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4472 /* legacy firmware loading */
4473 r = gfx_v11_0_cp_gfx_load_microcode(adev);
4474 if (r)
4475 return r;
4476
4477 if (adev->gfx.rs64_enable)
4478 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4479 else
4480 r = gfx_v11_0_cp_compute_load_microcode(adev);
4481 if (r)
4482 return r;
4483 }
4484
4485 gfx_v11_0_cp_set_doorbell_range(adev);
4486
4487 if (amdgpu_async_gfx_ring) {
4488 gfx_v11_0_cp_compute_enable(adev, true);
4489 gfx_v11_0_cp_gfx_enable(adev, true);
4490 }
4491
4492 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4493 r = amdgpu_mes_kiq_hw_init(adev);
4494 else
4495 r = gfx_v11_0_kiq_resume(adev);
4496 if (r)
4497 return r;
4498
4499 r = gfx_v11_0_kcq_resume(adev);
4500 if (r)
4501 return r;
4502
4503 if (!amdgpu_async_gfx_ring) {
4504 r = gfx_v11_0_cp_gfx_resume(adev);
4505 if (r)
4506 return r;
4507 } else {
4508 r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4509 if (r)
4510 return r;
4511 }
4512
4513 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4514 ring = &adev->gfx.gfx_ring[i];
4515 r = amdgpu_ring_test_helper(ring);
4516 if (r)
4517 return r;
4518 }
4519
4520 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4521 ring = &adev->gfx.compute_ring[i];
4522 r = amdgpu_ring_test_helper(ring);
4523 if (r)
4524 return r;
4525 }
4526
4527 return 0;
4528 }
4529
gfx_v11_0_cp_enable(struct amdgpu_device * adev,bool enable)4530 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4531 {
4532 gfx_v11_0_cp_gfx_enable(adev, enable);
4533 gfx_v11_0_cp_compute_enable(adev, enable);
4534 }
4535
gfx_v11_0_gfxhub_enable(struct amdgpu_device * adev)4536 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4537 {
4538 int r;
4539 bool value;
4540
4541 r = adev->gfxhub.funcs->gart_enable(adev);
4542 if (r)
4543 return r;
4544
4545 adev->hdp.funcs->flush_hdp(adev, NULL);
4546
4547 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
4548 false : true;
4549
4550 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4551 /* TODO investigate why this and the hdp flush above is needed,
4552 * are we missing a flush somewhere else? */
4553 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
4554
4555 return 0;
4556 }
4557
gfx_v11_0_select_cp_fw_arch(struct amdgpu_device * adev)4558 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4559 {
4560 u32 tmp;
4561
4562 /* select RS64 */
4563 if (adev->gfx.rs64_enable) {
4564 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4565 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4566 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4567
4568 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4569 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4570 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4571 }
4572
4573 if (amdgpu_emu_mode == 1)
4574 msleep(100);
4575 }
4576
get_gb_addr_config(struct amdgpu_device * adev)4577 static int get_gb_addr_config(struct amdgpu_device * adev)
4578 {
4579 u32 gb_addr_config;
4580
4581 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4582 if (gb_addr_config == 0)
4583 return -EINVAL;
4584
4585 adev->gfx.config.gb_addr_config_fields.num_pkrs =
4586 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4587
4588 adev->gfx.config.gb_addr_config = gb_addr_config;
4589
4590 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4591 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4592 GB_ADDR_CONFIG, NUM_PIPES);
4593
4594 adev->gfx.config.max_tile_pipes =
4595 adev->gfx.config.gb_addr_config_fields.num_pipes;
4596
4597 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4598 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4599 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4600 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4601 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4602 GB_ADDR_CONFIG, NUM_RB_PER_SE);
4603 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4604 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4605 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4606 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4607 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4608 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4609
4610 return 0;
4611 }
4612
gfx_v11_0_disable_gpa_mode(struct amdgpu_device * adev)4613 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4614 {
4615 uint32_t data;
4616
4617 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4618 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4619 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4620
4621 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4622 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4623 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4624 }
4625
gfx_v11_0_hw_init(struct amdgpu_ip_block * ip_block)4626 static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
4627 {
4628 int r;
4629 struct amdgpu_device *adev = ip_block->adev;
4630
4631 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
4632 adev->gfx.cleaner_shader_ptr);
4633
4634 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4635 if (adev->gfx.imu.funcs) {
4636 /* RLC autoload sequence 1: Program rlc ram */
4637 if (adev->gfx.imu.funcs->program_rlc_ram)
4638 adev->gfx.imu.funcs->program_rlc_ram(adev);
4639 /* rlc autoload firmware */
4640 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4641 if (r)
4642 return r;
4643 }
4644 } else {
4645 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4646 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4647 if (adev->gfx.imu.funcs->load_microcode)
4648 adev->gfx.imu.funcs->load_microcode(adev);
4649 if (adev->gfx.imu.funcs->setup_imu)
4650 adev->gfx.imu.funcs->setup_imu(adev);
4651 if (adev->gfx.imu.funcs->start_imu)
4652 adev->gfx.imu.funcs->start_imu(adev);
4653 }
4654
4655 /* disable gpa mode in backdoor loading */
4656 gfx_v11_0_disable_gpa_mode(adev);
4657 }
4658 }
4659
4660 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4661 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4662 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4663 if (r) {
4664 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4665 return r;
4666 }
4667 }
4668
4669 adev->gfx.is_poweron = true;
4670
4671 if(get_gb_addr_config(adev))
4672 DRM_WARN("Invalid gb_addr_config !\n");
4673
4674 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4675 adev->gfx.rs64_enable)
4676 gfx_v11_0_config_gfx_rs64(adev);
4677
4678 r = gfx_v11_0_gfxhub_enable(adev);
4679 if (r)
4680 return r;
4681
4682 if (!amdgpu_emu_mode)
4683 gfx_v11_0_init_golden_registers(adev);
4684
4685 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4686 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4687 /**
4688 * For gfx 11, rlc firmware loading relies on smu firmware is
4689 * loaded firstly, so in direct type, it has to load smc ucode
4690 * here before rlc.
4691 */
4692 r = amdgpu_pm_load_smu_firmware(adev, NULL);
4693 if (r)
4694 return r;
4695 }
4696
4697 gfx_v11_0_constants_init(adev);
4698
4699 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4700 gfx_v11_0_select_cp_fw_arch(adev);
4701
4702 if (adev->nbio.funcs->gc_doorbell_init)
4703 adev->nbio.funcs->gc_doorbell_init(adev);
4704
4705 r = gfx_v11_0_rlc_resume(adev);
4706 if (r)
4707 return r;
4708
4709 /*
4710 * init golden registers and rlc resume may override some registers,
4711 * reconfig them here
4712 */
4713 gfx_v11_0_tcp_harvest(adev);
4714
4715 r = gfx_v11_0_cp_resume(adev);
4716 if (r)
4717 return r;
4718
4719 /* get IMU version from HW if it's not set */
4720 if (!adev->gfx.imu_fw_version)
4721 adev->gfx.imu_fw_version = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_0);
4722
4723 return r;
4724 }
4725
gfx_v11_0_hw_fini(struct amdgpu_ip_block * ip_block)4726 static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
4727 {
4728 struct amdgpu_device *adev = ip_block->adev;
4729
4730 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4731 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4732 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
4733
4734 if (!adev->no_hw_access) {
4735 if (amdgpu_async_gfx_ring) {
4736 if (amdgpu_gfx_disable_kgq(adev, 0))
4737 DRM_ERROR("KGQ disable failed\n");
4738 }
4739
4740 if (amdgpu_gfx_disable_kcq(adev, 0))
4741 DRM_ERROR("KCQ disable failed\n");
4742
4743 amdgpu_mes_kiq_hw_fini(adev);
4744 }
4745
4746 if (amdgpu_sriov_vf(adev))
4747 /* Remove the steps disabling CPG and clearing KIQ position,
4748 * so that CP could perform IDLE-SAVE during switch. Those
4749 * steps are necessary to avoid a DMAR error in gfx9 but it is
4750 * not reproduced on gfx11.
4751 */
4752 return 0;
4753
4754 gfx_v11_0_cp_enable(adev, false);
4755 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4756
4757 adev->gfxhub.funcs->gart_disable(adev);
4758
4759 adev->gfx.is_poweron = false;
4760
4761 return 0;
4762 }
4763
gfx_v11_0_suspend(struct amdgpu_ip_block * ip_block)4764 static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block)
4765 {
4766 return gfx_v11_0_hw_fini(ip_block);
4767 }
4768
gfx_v11_0_resume(struct amdgpu_ip_block * ip_block)4769 static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
4770 {
4771 return gfx_v11_0_hw_init(ip_block);
4772 }
4773
gfx_v11_0_is_idle(void * handle)4774 static bool gfx_v11_0_is_idle(void *handle)
4775 {
4776 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4777
4778 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4779 GRBM_STATUS, GUI_ACTIVE))
4780 return false;
4781 else
4782 return true;
4783 }
4784
gfx_v11_0_wait_for_idle(struct amdgpu_ip_block * ip_block)4785 static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4786 {
4787 unsigned i;
4788 u32 tmp;
4789 struct amdgpu_device *adev = ip_block->adev;
4790
4791 for (i = 0; i < adev->usec_timeout; i++) {
4792 /* read MC_STATUS */
4793 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4794 GRBM_STATUS__GUI_ACTIVE_MASK;
4795
4796 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4797 return 0;
4798 udelay(1);
4799 }
4800 return -ETIMEDOUT;
4801 }
4802
gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device * adev,bool req)4803 int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
4804 bool req)
4805 {
4806 u32 i, tmp, val;
4807
4808 for (i = 0; i < adev->usec_timeout; i++) {
4809 /* Request with MeId=2, PipeId=0 */
4810 tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
4811 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
4812 WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
4813
4814 val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
4815 if (req) {
4816 if (val == tmp)
4817 break;
4818 } else {
4819 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
4820 REQUEST, 1);
4821
4822 /* unlocked or locked by firmware */
4823 if (val != tmp)
4824 break;
4825 }
4826 udelay(1);
4827 }
4828
4829 if (i >= adev->usec_timeout)
4830 return -EINVAL;
4831
4832 return 0;
4833 }
4834
gfx_v11_0_soft_reset(struct amdgpu_ip_block * ip_block)4835 static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
4836 {
4837 u32 grbm_soft_reset = 0;
4838 u32 tmp;
4839 int r, i, j, k;
4840 struct amdgpu_device *adev = ip_block->adev;
4841
4842 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4843
4844 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4845 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4846 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4847 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4848 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4849 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4850
4851 mutex_lock(&adev->srbm_mutex);
4852 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4853 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4854 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4855 soc21_grbm_select(adev, i, k, j, 0);
4856
4857 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
4858 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
4859 }
4860 }
4861 }
4862 for (i = 0; i < adev->gfx.me.num_me; ++i) {
4863 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
4864 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
4865 soc21_grbm_select(adev, i, k, j, 0);
4866
4867 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
4868 }
4869 }
4870 }
4871 soc21_grbm_select(adev, 0, 0, 0, 0);
4872 mutex_unlock(&adev->srbm_mutex);
4873
4874 /* Try to acquire the gfx mutex before access to CP_VMID_RESET */
4875 mutex_lock(&adev->gfx.reset_sem_mutex);
4876 r = gfx_v11_0_request_gfx_index_mutex(adev, true);
4877 if (r) {
4878 mutex_unlock(&adev->gfx.reset_sem_mutex);
4879 DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
4880 return r;
4881 }
4882
4883 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
4884
4885 // Read CP_VMID_RESET register three times.
4886 // to get sufficient time for GFX_HQD_ACTIVE reach 0
4887 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4888 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4889 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4890
4891 /* release the gfx mutex */
4892 r = gfx_v11_0_request_gfx_index_mutex(adev, false);
4893 mutex_unlock(&adev->gfx.reset_sem_mutex);
4894 if (r) {
4895 DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
4896 return r;
4897 }
4898
4899 for (i = 0; i < adev->usec_timeout; i++) {
4900 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
4901 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
4902 break;
4903 udelay(1);
4904 }
4905 if (i >= adev->usec_timeout) {
4906 printk("Failed to wait all pipes clean\n");
4907 return -EINVAL;
4908 }
4909
4910 /********** trigger soft reset ***********/
4911 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4912 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4913 SOFT_RESET_CP, 1);
4914 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4915 SOFT_RESET_GFX, 1);
4916 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4917 SOFT_RESET_CPF, 1);
4918 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4919 SOFT_RESET_CPC, 1);
4920 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4921 SOFT_RESET_CPG, 1);
4922 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4923 /********** exit soft reset ***********/
4924 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4925 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4926 SOFT_RESET_CP, 0);
4927 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4928 SOFT_RESET_GFX, 0);
4929 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4930 SOFT_RESET_CPF, 0);
4931 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4932 SOFT_RESET_CPC, 0);
4933 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4934 SOFT_RESET_CPG, 0);
4935 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4936
4937 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
4938 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
4939 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
4940
4941 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
4942 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
4943
4944 for (i = 0; i < adev->usec_timeout; i++) {
4945 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
4946 break;
4947 udelay(1);
4948 }
4949 if (i >= adev->usec_timeout) {
4950 printk("Failed to wait CP_VMID_RESET to 0\n");
4951 return -EINVAL;
4952 }
4953
4954 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4955 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4956 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4957 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4958 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4959 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4960
4961 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4962
4963 return gfx_v11_0_cp_resume(adev);
4964 }
4965
gfx_v11_0_check_soft_reset(struct amdgpu_ip_block * ip_block)4966 static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
4967 {
4968 int i, r;
4969 struct amdgpu_device *adev = ip_block->adev;
4970 struct amdgpu_ring *ring;
4971 long tmo = msecs_to_jiffies(1000);
4972
4973 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4974 ring = &adev->gfx.gfx_ring[i];
4975 r = amdgpu_ring_test_ib(ring, tmo);
4976 if (r)
4977 return true;
4978 }
4979
4980 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4981 ring = &adev->gfx.compute_ring[i];
4982 r = amdgpu_ring_test_ib(ring, tmo);
4983 if (r)
4984 return true;
4985 }
4986
4987 return false;
4988 }
4989
gfx_v11_0_post_soft_reset(struct amdgpu_ip_block * ip_block)4990 static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
4991 {
4992 struct amdgpu_device *adev = ip_block->adev;
4993 /**
4994 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
4995 */
4996 return amdgpu_mes_resume(adev);
4997 }
4998
gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device * adev)4999 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5000 {
5001 uint64_t clock;
5002 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
5003
5004 if (amdgpu_sriov_vf(adev)) {
5005 amdgpu_gfx_off_ctrl(adev, false);
5006 mutex_lock(&adev->gfx.gpu_clock_mutex);
5007 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
5008 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
5009 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
5010 if (clock_counter_hi_pre != clock_counter_hi_after)
5011 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
5012 mutex_unlock(&adev->gfx.gpu_clock_mutex);
5013 amdgpu_gfx_off_ctrl(adev, true);
5014 } else {
5015 preempt_disable();
5016 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
5017 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
5018 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
5019 if (clock_counter_hi_pre != clock_counter_hi_after)
5020 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
5021 preempt_enable();
5022 }
5023 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
5024
5025 return clock;
5026 }
5027
gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring * ring,uint32_t vmid,uint32_t gds_base,uint32_t gds_size,uint32_t gws_base,uint32_t gws_size,uint32_t oa_base,uint32_t oa_size)5028 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5029 uint32_t vmid,
5030 uint32_t gds_base, uint32_t gds_size,
5031 uint32_t gws_base, uint32_t gws_size,
5032 uint32_t oa_base, uint32_t oa_size)
5033 {
5034 struct amdgpu_device *adev = ring->adev;
5035
5036 /* GDS Base */
5037 gfx_v11_0_write_data_to_reg(ring, 0, false,
5038 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
5039 gds_base);
5040
5041 /* GDS Size */
5042 gfx_v11_0_write_data_to_reg(ring, 0, false,
5043 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
5044 gds_size);
5045
5046 /* GWS */
5047 gfx_v11_0_write_data_to_reg(ring, 0, false,
5048 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
5049 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5050
5051 /* OA */
5052 gfx_v11_0_write_data_to_reg(ring, 0, false,
5053 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
5054 (1 << (oa_size + oa_base)) - (1 << oa_base));
5055 }
5056
gfx_v11_0_early_init(struct amdgpu_ip_block * ip_block)5057 static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
5058 {
5059 struct amdgpu_device *adev = ip_block->adev;
5060
5061 adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
5062
5063 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
5064 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5065 AMDGPU_MAX_COMPUTE_RINGS);
5066
5067 gfx_v11_0_set_kiq_pm4_funcs(adev);
5068 gfx_v11_0_set_ring_funcs(adev);
5069 gfx_v11_0_set_irq_funcs(adev);
5070 gfx_v11_0_set_gds_init(adev);
5071 gfx_v11_0_set_rlc_funcs(adev);
5072 gfx_v11_0_set_mqd_funcs(adev);
5073 gfx_v11_0_set_imu_funcs(adev);
5074
5075 gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
5076
5077 return gfx_v11_0_init_microcode(adev);
5078 }
5079
gfx_v11_0_late_init(struct amdgpu_ip_block * ip_block)5080 static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
5081 {
5082 struct amdgpu_device *adev = ip_block->adev;
5083 int r;
5084
5085 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5086 if (r)
5087 return r;
5088
5089 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5090 if (r)
5091 return r;
5092
5093 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
5094 if (r)
5095 return r;
5096 return 0;
5097 }
5098
gfx_v11_0_is_rlc_enabled(struct amdgpu_device * adev)5099 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
5100 {
5101 uint32_t rlc_cntl;
5102
5103 /* if RLC is not enabled, do nothing */
5104 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
5105 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
5106 }
5107
gfx_v11_0_set_safe_mode(struct amdgpu_device * adev,int xcc_id)5108 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
5109 {
5110 uint32_t data;
5111 unsigned i;
5112
5113 data = RLC_SAFE_MODE__CMD_MASK;
5114 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5115
5116 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
5117
5118 /* wait for RLC_SAFE_MODE */
5119 for (i = 0; i < adev->usec_timeout; i++) {
5120 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
5121 RLC_SAFE_MODE, CMD))
5122 break;
5123 udelay(1);
5124 }
5125 }
5126
gfx_v11_0_unset_safe_mode(struct amdgpu_device * adev,int xcc_id)5127 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
5128 {
5129 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
5130 }
5131
gfx_v11_0_update_perf_clk(struct amdgpu_device * adev,bool enable)5132 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
5133 bool enable)
5134 {
5135 uint32_t def, data;
5136
5137 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
5138 return;
5139
5140 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5141
5142 if (enable)
5143 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5144 else
5145 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5146
5147 if (def != data)
5148 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5149 }
5150
gfx_v11_0_update_sram_fgcg(struct amdgpu_device * adev,bool enable)5151 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
5152 bool enable)
5153 {
5154 uint32_t def, data;
5155
5156 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
5157 return;
5158
5159 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5160
5161 if (enable)
5162 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5163 else
5164 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5165
5166 if (def != data)
5167 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5168 }
5169
gfx_v11_0_update_repeater_fgcg(struct amdgpu_device * adev,bool enable)5170 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
5171 bool enable)
5172 {
5173 uint32_t def, data;
5174
5175 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
5176 return;
5177
5178 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5179
5180 if (enable)
5181 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5182 else
5183 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5184
5185 if (def != data)
5186 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5187 }
5188
gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)5189 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5190 bool enable)
5191 {
5192 uint32_t data, def;
5193
5194 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
5195 return;
5196
5197 /* It is disabled by HW by default */
5198 if (enable) {
5199 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5200 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
5201 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5202
5203 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5204 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5205 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5206
5207 if (def != data)
5208 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5209 }
5210 } else {
5211 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5212 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5213
5214 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5215 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5216 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5217
5218 if (def != data)
5219 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5220 }
5221 }
5222 }
5223
gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device * adev,bool enable)5224 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5225 bool enable)
5226 {
5227 uint32_t def, data;
5228
5229 if (!(adev->cg_flags &
5230 (AMD_CG_SUPPORT_GFX_CGCG |
5231 AMD_CG_SUPPORT_GFX_CGLS |
5232 AMD_CG_SUPPORT_GFX_3D_CGCG |
5233 AMD_CG_SUPPORT_GFX_3D_CGLS)))
5234 return;
5235
5236 if (enable) {
5237 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5238
5239 /* unset CGCG override */
5240 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5241 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5242 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5243 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5244 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
5245 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5246 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5247
5248 /* update CGCG override bits */
5249 if (def != data)
5250 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5251
5252 /* enable cgcg FSM(0x0000363F) */
5253 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5254
5255 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5256 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
5257 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5258 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5259 }
5260
5261 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5262 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
5263 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5264 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5265 }
5266
5267 if (def != data)
5268 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5269
5270 /* Program RLC_CGCG_CGLS_CTRL_3D */
5271 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5272
5273 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5274 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
5275 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5276 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5277 }
5278
5279 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5280 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
5281 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5282 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5283 }
5284
5285 if (def != data)
5286 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5287
5288 /* set IDLE_POLL_COUNT(0x00900100) */
5289 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
5290
5291 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
5292 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5293 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5294
5295 if (def != data)
5296 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
5297
5298 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
5299 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
5300 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
5301 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
5302 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
5303 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
5304
5305 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5306 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5307 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5308
5309 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
5310 if (adev->sdma.num_instances > 1) {
5311 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5312 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5313 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5314 }
5315 } else {
5316 /* Program RLC_CGCG_CGLS_CTRL */
5317 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5318
5319 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5320 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5321
5322 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5323 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5324
5325 if (def != data)
5326 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5327
5328 /* Program RLC_CGCG_CGLS_CTRL_3D */
5329 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5330
5331 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5332 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5333 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5334 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5335
5336 if (def != data)
5337 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5338
5339 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5340 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5341 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5342
5343 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
5344 if (adev->sdma.num_instances > 1) {
5345 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5346 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5347 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5348 }
5349 }
5350 }
5351
gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device * adev,bool enable)5352 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5353 bool enable)
5354 {
5355 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5356
5357 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
5358
5359 gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
5360
5361 gfx_v11_0_update_repeater_fgcg(adev, enable);
5362
5363 gfx_v11_0_update_sram_fgcg(adev, enable);
5364
5365 gfx_v11_0_update_perf_clk(adev, enable);
5366
5367 if (adev->cg_flags &
5368 (AMD_CG_SUPPORT_GFX_MGCG |
5369 AMD_CG_SUPPORT_GFX_CGLS |
5370 AMD_CG_SUPPORT_GFX_CGCG |
5371 AMD_CG_SUPPORT_GFX_3D_CGCG |
5372 AMD_CG_SUPPORT_GFX_3D_CGLS))
5373 gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
5374
5375 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5376
5377 return 0;
5378 }
5379
gfx_v11_0_update_spm_vmid(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned vmid)5380 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
5381 {
5382 u32 reg, pre_data, data;
5383
5384 amdgpu_gfx_off_ctrl(adev, false);
5385 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
5386 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
5387 pre_data = RREG32_NO_KIQ(reg);
5388 else
5389 pre_data = RREG32(reg);
5390
5391 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
5392 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5393
5394 if (pre_data != data) {
5395 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
5396 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
5397 } else
5398 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
5399 }
5400 amdgpu_gfx_off_ctrl(adev, true);
5401
5402 if (ring
5403 && amdgpu_sriov_is_pp_one_vf(adev)
5404 && (pre_data != data)
5405 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
5406 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
5407 amdgpu_ring_emit_wreg(ring, reg, data);
5408 }
5409 }
5410
5411 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5412 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5413 .set_safe_mode = gfx_v11_0_set_safe_mode,
5414 .unset_safe_mode = gfx_v11_0_unset_safe_mode,
5415 .init = gfx_v11_0_rlc_init,
5416 .get_csb_size = gfx_v11_0_get_csb_size,
5417 .get_csb_buffer = gfx_v11_0_get_csb_buffer,
5418 .resume = gfx_v11_0_rlc_resume,
5419 .stop = gfx_v11_0_rlc_stop,
5420 .reset = gfx_v11_0_rlc_reset,
5421 .start = gfx_v11_0_rlc_start,
5422 .update_spm_vmid = gfx_v11_0_update_spm_vmid,
5423 };
5424
gfx_v11_cntl_power_gating(struct amdgpu_device * adev,bool enable)5425 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
5426 {
5427 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
5428
5429 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5430 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5431 else
5432 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5433
5434 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5435
5436 // Program RLC_PG_DELAY3 for CGPG hysteresis
5437 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5438 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5439 case IP_VERSION(11, 0, 1):
5440 case IP_VERSION(11, 0, 4):
5441 case IP_VERSION(11, 5, 0):
5442 case IP_VERSION(11, 5, 1):
5443 case IP_VERSION(11, 5, 2):
5444 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5445 break;
5446 default:
5447 break;
5448 }
5449 }
5450 }
5451
gfx_v11_cntl_pg(struct amdgpu_device * adev,bool enable)5452 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5453 {
5454 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5455
5456 gfx_v11_cntl_power_gating(adev, enable);
5457
5458 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5459 }
5460
gfx_v11_0_set_powergating_state(void * handle,enum amd_powergating_state state)5461 static int gfx_v11_0_set_powergating_state(void *handle,
5462 enum amd_powergating_state state)
5463 {
5464 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5465 bool enable = (state == AMD_PG_STATE_GATE);
5466
5467 if (amdgpu_sriov_vf(adev))
5468 return 0;
5469
5470 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5471 case IP_VERSION(11, 0, 0):
5472 case IP_VERSION(11, 0, 2):
5473 case IP_VERSION(11, 0, 3):
5474 amdgpu_gfx_off_ctrl(adev, enable);
5475 break;
5476 case IP_VERSION(11, 0, 1):
5477 case IP_VERSION(11, 0, 4):
5478 case IP_VERSION(11, 5, 0):
5479 case IP_VERSION(11, 5, 1):
5480 case IP_VERSION(11, 5, 2):
5481 if (!enable)
5482 amdgpu_gfx_off_ctrl(adev, false);
5483
5484 gfx_v11_cntl_pg(adev, enable);
5485
5486 if (enable)
5487 amdgpu_gfx_off_ctrl(adev, true);
5488
5489 break;
5490 default:
5491 break;
5492 }
5493
5494 return 0;
5495 }
5496
gfx_v11_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)5497 static int gfx_v11_0_set_clockgating_state(void *handle,
5498 enum amd_clockgating_state state)
5499 {
5500 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5501
5502 if (amdgpu_sriov_vf(adev))
5503 return 0;
5504
5505 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
5506 case IP_VERSION(11, 0, 0):
5507 case IP_VERSION(11, 0, 1):
5508 case IP_VERSION(11, 0, 2):
5509 case IP_VERSION(11, 0, 3):
5510 case IP_VERSION(11, 0, 4):
5511 case IP_VERSION(11, 5, 0):
5512 case IP_VERSION(11, 5, 1):
5513 case IP_VERSION(11, 5, 2):
5514 gfx_v11_0_update_gfx_clock_gating(adev,
5515 state == AMD_CG_STATE_GATE);
5516 break;
5517 default:
5518 break;
5519 }
5520
5521 return 0;
5522 }
5523
gfx_v11_0_get_clockgating_state(void * handle,u64 * flags)5524 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
5525 {
5526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5527 int data;
5528
5529 /* AMD_CG_SUPPORT_GFX_MGCG */
5530 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5531 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5532 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5533
5534 /* AMD_CG_SUPPORT_REPEATER_FGCG */
5535 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5536 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5537
5538 /* AMD_CG_SUPPORT_GFX_FGCG */
5539 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5540 *flags |= AMD_CG_SUPPORT_GFX_FGCG;
5541
5542 /* AMD_CG_SUPPORT_GFX_PERF_CLK */
5543 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5544 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5545
5546 /* AMD_CG_SUPPORT_GFX_CGCG */
5547 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5548 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5549 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5550
5551 /* AMD_CG_SUPPORT_GFX_CGLS */
5552 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5553 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5554
5555 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5556 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5557 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5558 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5559
5560 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5561 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5562 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5563 }
5564
gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring * ring)5565 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5566 {
5567 /* gfx11 is 32bit rptr*/
5568 return *(uint32_t *)ring->rptr_cpu_addr;
5569 }
5570
gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring * ring)5571 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5572 {
5573 struct amdgpu_device *adev = ring->adev;
5574 u64 wptr;
5575
5576 /* XXX check if swapping is necessary on BE */
5577 if (ring->use_doorbell) {
5578 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5579 } else {
5580 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5581 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5582 }
5583
5584 return wptr;
5585 }
5586
gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring * ring)5587 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5588 {
5589 struct amdgpu_device *adev = ring->adev;
5590
5591 if (ring->use_doorbell) {
5592 /* XXX check if swapping is necessary on BE */
5593 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5594 ring->wptr);
5595 WDOORBELL64(ring->doorbell_index, ring->wptr);
5596 } else {
5597 WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5598 lower_32_bits(ring->wptr));
5599 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5600 upper_32_bits(ring->wptr));
5601 }
5602 }
5603
gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring * ring)5604 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5605 {
5606 /* gfx11 hardware is 32bit rptr */
5607 return *(uint32_t *)ring->rptr_cpu_addr;
5608 }
5609
gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring * ring)5610 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5611 {
5612 u64 wptr;
5613
5614 /* XXX check if swapping is necessary on BE */
5615 if (ring->use_doorbell)
5616 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5617 else
5618 BUG();
5619 return wptr;
5620 }
5621
gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring * ring)5622 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5623 {
5624 struct amdgpu_device *adev = ring->adev;
5625
5626 /* XXX check if swapping is necessary on BE */
5627 if (ring->use_doorbell) {
5628 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5629 ring->wptr);
5630 WDOORBELL64(ring->doorbell_index, ring->wptr);
5631 } else {
5632 BUG(); /* only DOORBELL method supported on gfx11 now */
5633 }
5634 }
5635
gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)5636 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5637 {
5638 struct amdgpu_device *adev = ring->adev;
5639 u32 ref_and_mask, reg_mem_engine;
5640 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5641
5642 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5643 switch (ring->me) {
5644 case 1:
5645 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5646 break;
5647 case 2:
5648 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5649 break;
5650 default:
5651 return;
5652 }
5653 reg_mem_engine = 0;
5654 } else {
5655 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe;
5656 reg_mem_engine = 1; /* pfp */
5657 }
5658
5659 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5660 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5661 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5662 ref_and_mask, ref_and_mask, 0x20);
5663 }
5664
gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)5665 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5666 struct amdgpu_job *job,
5667 struct amdgpu_ib *ib,
5668 uint32_t flags)
5669 {
5670 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5671 u32 header, control = 0;
5672
5673 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5674
5675 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5676
5677 control |= ib->length_dw | (vmid << 24);
5678
5679 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5680 control |= INDIRECT_BUFFER_PRE_ENB(1);
5681
5682 if (flags & AMDGPU_IB_PREEMPTED)
5683 control |= INDIRECT_BUFFER_PRE_RESUME(1);
5684
5685 if (vmid)
5686 gfx_v11_0_ring_emit_de_meta(ring,
5687 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5688 }
5689
5690 if (ring->is_mes_queue)
5691 /* inherit vmid from mqd */
5692 control |= 0x400000;
5693
5694 amdgpu_ring_write(ring, header);
5695 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5696 amdgpu_ring_write(ring,
5697 #ifdef __BIG_ENDIAN
5698 (2 << 0) |
5699 #endif
5700 lower_32_bits(ib->gpu_addr));
5701 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5702 amdgpu_ring_write(ring, control);
5703 }
5704
gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)5705 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5706 struct amdgpu_job *job,
5707 struct amdgpu_ib *ib,
5708 uint32_t flags)
5709 {
5710 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5711 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5712
5713 if (ring->is_mes_queue)
5714 /* inherit vmid from mqd */
5715 control |= 0x40000000;
5716
5717 /* Currently, there is a high possibility to get wave ID mismatch
5718 * between ME and GDS, leading to a hw deadlock, because ME generates
5719 * different wave IDs than the GDS expects. This situation happens
5720 * randomly when at least 5 compute pipes use GDS ordered append.
5721 * The wave IDs generated by ME are also wrong after suspend/resume.
5722 * Those are probably bugs somewhere else in the kernel driver.
5723 *
5724 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5725 * GDS to 0 for this ring (me/pipe).
5726 */
5727 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5728 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5729 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5730 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5731 }
5732
5733 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5734 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5735 amdgpu_ring_write(ring,
5736 #ifdef __BIG_ENDIAN
5737 (2 << 0) |
5738 #endif
5739 lower_32_bits(ib->gpu_addr));
5740 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5741 amdgpu_ring_write(ring, control);
5742 }
5743
gfx_v11_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)5744 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5745 u64 seq, unsigned flags)
5746 {
5747 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5748 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5749
5750 /* RELEASE_MEM - flush caches, send int */
5751 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5752 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5753 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5754 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
5755 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5756 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5757 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5758 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5759 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5760 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5761
5762 /*
5763 * the address should be Qword aligned if 64bit write, Dword
5764 * aligned if only send 32bit data low (discard data high)
5765 */
5766 if (write64bit)
5767 BUG_ON(addr & 0x7);
5768 else
5769 BUG_ON(addr & 0x3);
5770 amdgpu_ring_write(ring, lower_32_bits(addr));
5771 amdgpu_ring_write(ring, upper_32_bits(addr));
5772 amdgpu_ring_write(ring, lower_32_bits(seq));
5773 amdgpu_ring_write(ring, upper_32_bits(seq));
5774 amdgpu_ring_write(ring, ring->is_mes_queue ?
5775 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
5776 }
5777
gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring * ring)5778 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5779 {
5780 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5781 uint32_t seq = ring->fence_drv.sync_seq;
5782 uint64_t addr = ring->fence_drv.gpu_addr;
5783
5784 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5785 upper_32_bits(addr), seq, 0xffffffff, 4);
5786 }
5787
gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring * ring,uint16_t pasid,uint32_t flush_type,bool all_hub,uint8_t dst_sel)5788 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5789 uint16_t pasid, uint32_t flush_type,
5790 bool all_hub, uint8_t dst_sel)
5791 {
5792 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5793 amdgpu_ring_write(ring,
5794 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5795 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5796 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5797 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5798 }
5799
gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)5800 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5801 unsigned vmid, uint64_t pd_addr)
5802 {
5803 if (ring->is_mes_queue)
5804 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
5805 else
5806 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5807
5808 /* compute doesn't have PFP */
5809 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5810 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5811 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5812 amdgpu_ring_write(ring, 0x0);
5813 }
5814
5815 /* Make sure that we can't skip the SET_Q_MODE packets when the VM
5816 * changed in any way.
5817 */
5818 ring->set_q_mode_offs = 0;
5819 ring->set_q_mode_ptr = NULL;
5820 }
5821
gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)5822 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5823 u64 seq, unsigned int flags)
5824 {
5825 struct amdgpu_device *adev = ring->adev;
5826
5827 /* we only allocate 32bit for each seq wb address */
5828 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5829
5830 /* write fence seq to the "addr" */
5831 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5832 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5833 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5834 amdgpu_ring_write(ring, lower_32_bits(addr));
5835 amdgpu_ring_write(ring, upper_32_bits(addr));
5836 amdgpu_ring_write(ring, lower_32_bits(seq));
5837
5838 if (flags & AMDGPU_FENCE_FLAG_INT) {
5839 /* set register to trigger INT */
5840 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5841 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5842 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5843 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
5844 amdgpu_ring_write(ring, 0);
5845 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5846 }
5847 }
5848
gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring * ring,uint32_t flags)5849 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
5850 uint32_t flags)
5851 {
5852 uint32_t dw2 = 0;
5853
5854 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5855 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5856 /* set load_global_config & load_global_uconfig */
5857 dw2 |= 0x8001;
5858 /* set load_cs_sh_regs */
5859 dw2 |= 0x01000000;
5860 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5861 dw2 |= 0x10002;
5862 }
5863
5864 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5865 amdgpu_ring_write(ring, dw2);
5866 amdgpu_ring_write(ring, 0);
5867 }
5868
gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring * ring,uint64_t addr)5869 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
5870 uint64_t addr)
5871 {
5872 unsigned ret;
5873
5874 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5875 amdgpu_ring_write(ring, lower_32_bits(addr));
5876 amdgpu_ring_write(ring, upper_32_bits(addr));
5877 /* discard following DWs if *cond_exec_gpu_addr==0 */
5878 amdgpu_ring_write(ring, 0);
5879 ret = ring->wptr & ring->buf_mask;
5880 /* patch dummy value later */
5881 amdgpu_ring_write(ring, 0);
5882
5883 return ret;
5884 }
5885
gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring * ring,u64 shadow_va,u64 csa_va,u64 gds_va,bool init_shadow,int vmid)5886 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
5887 u64 shadow_va, u64 csa_va,
5888 u64 gds_va, bool init_shadow,
5889 int vmid)
5890 {
5891 struct amdgpu_device *adev = ring->adev;
5892 unsigned int offs, end;
5893
5894 if (!adev->gfx.cp_gfx_shadow || !ring->ring_obj)
5895 return;
5896
5897 /*
5898 * The logic here isn't easy to understand because we need to keep state
5899 * accross multiple executions of the function as well as between the
5900 * CPU and GPU. The general idea is that the newly written GPU command
5901 * has a condition on the previous one and only executed if really
5902 * necessary.
5903 */
5904
5905 /*
5906 * The dw in the NOP controls if the next SET_Q_MODE packet should be
5907 * executed or not. Reserve 64bits just to be on the save side.
5908 */
5909 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, 1));
5910 offs = ring->wptr & ring->buf_mask;
5911
5912 /*
5913 * We start with skipping the prefix SET_Q_MODE and always executing
5914 * the postfix SET_Q_MODE packet. This is changed below with a
5915 * WRITE_DATA command when the postfix executed.
5916 */
5917 amdgpu_ring_write(ring, shadow_va ? 1 : 0);
5918 amdgpu_ring_write(ring, 0);
5919
5920 if (ring->set_q_mode_offs) {
5921 uint64_t addr;
5922
5923 addr = amdgpu_bo_gpu_offset(ring->ring_obj);
5924 addr += ring->set_q_mode_offs << 2;
5925 end = gfx_v11_0_ring_emit_init_cond_exec(ring, addr);
5926 }
5927
5928 /*
5929 * When the postfix SET_Q_MODE packet executes we need to make sure that the
5930 * next prefix SET_Q_MODE packet executes as well.
5931 */
5932 if (!shadow_va) {
5933 uint64_t addr;
5934
5935 addr = amdgpu_bo_gpu_offset(ring->ring_obj);
5936 addr += offs << 2;
5937 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5938 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
5939 amdgpu_ring_write(ring, lower_32_bits(addr));
5940 amdgpu_ring_write(ring, upper_32_bits(addr));
5941 amdgpu_ring_write(ring, 0x1);
5942 }
5943
5944 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
5945 amdgpu_ring_write(ring, lower_32_bits(shadow_va));
5946 amdgpu_ring_write(ring, upper_32_bits(shadow_va));
5947 amdgpu_ring_write(ring, lower_32_bits(gds_va));
5948 amdgpu_ring_write(ring, upper_32_bits(gds_va));
5949 amdgpu_ring_write(ring, lower_32_bits(csa_va));
5950 amdgpu_ring_write(ring, upper_32_bits(csa_va));
5951 amdgpu_ring_write(ring, shadow_va ?
5952 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
5953 amdgpu_ring_write(ring, init_shadow ?
5954 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
5955
5956 if (ring->set_q_mode_offs)
5957 amdgpu_ring_patch_cond_exec(ring, end);
5958
5959 if (shadow_va) {
5960 uint64_t token = shadow_va ^ csa_va ^ gds_va ^ vmid;
5961
5962 /*
5963 * If the tokens match try to skip the last postfix SET_Q_MODE
5964 * packet to avoid saving/restoring the state all the time.
5965 */
5966 if (ring->set_q_mode_ptr && ring->set_q_mode_token == token)
5967 *ring->set_q_mode_ptr = 0;
5968
5969 ring->set_q_mode_token = token;
5970 } else {
5971 ring->set_q_mode_ptr = &ring->ring[ring->set_q_mode_offs];
5972 }
5973
5974 ring->set_q_mode_offs = offs;
5975 }
5976
gfx_v11_0_ring_preempt_ib(struct amdgpu_ring * ring)5977 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
5978 {
5979 int i, r = 0;
5980 struct amdgpu_device *adev = ring->adev;
5981 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5982 struct amdgpu_ring *kiq_ring = &kiq->ring;
5983 unsigned long flags;
5984
5985 if (adev->enable_mes)
5986 return -EINVAL;
5987
5988 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5989 return -EINVAL;
5990
5991 spin_lock_irqsave(&kiq->ring_lock, flags);
5992
5993 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5994 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5995 return -ENOMEM;
5996 }
5997
5998 /* assert preemption condition */
5999 amdgpu_ring_set_preempt_cond_exec(ring, false);
6000
6001 /* assert IB preemption, emit the trailing fence */
6002 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
6003 ring->trail_fence_gpu_addr,
6004 ++ring->trail_seq);
6005 amdgpu_ring_commit(kiq_ring);
6006
6007 spin_unlock_irqrestore(&kiq->ring_lock, flags);
6008
6009 /* poll the trailing fence */
6010 for (i = 0; i < adev->usec_timeout; i++) {
6011 if (ring->trail_seq ==
6012 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
6013 break;
6014 udelay(1);
6015 }
6016
6017 if (i >= adev->usec_timeout) {
6018 r = -EINVAL;
6019 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
6020 }
6021
6022 /* deassert preemption condition */
6023 amdgpu_ring_set_preempt_cond_exec(ring, true);
6024 return r;
6025 }
6026
gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring * ring,bool resume)6027 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
6028 {
6029 struct amdgpu_device *adev = ring->adev;
6030 struct v10_de_ib_state de_payload = {0};
6031 uint64_t offset, gds_addr, de_payload_gpu_addr;
6032 void *de_payload_cpu_addr;
6033 int cnt;
6034
6035 if (ring->is_mes_queue) {
6036 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
6037 gfx[0].gfx_meta_data) +
6038 offsetof(struct v10_gfx_meta_data, de_payload);
6039 de_payload_gpu_addr =
6040 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
6041 de_payload_cpu_addr =
6042 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
6043
6044 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
6045 gfx[0].gds_backup) +
6046 offsetof(struct v10_gfx_meta_data, de_payload);
6047 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
6048 } else {
6049 offset = offsetof(struct v10_gfx_meta_data, de_payload);
6050 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
6051 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
6052
6053 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
6054 AMDGPU_CSA_SIZE - adev->gds.gds_size,
6055 PAGE_SIZE);
6056 }
6057
6058 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
6059 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
6060
6061 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
6062 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
6063 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
6064 WRITE_DATA_DST_SEL(8) |
6065 WR_CONFIRM) |
6066 WRITE_DATA_CACHE_POLICY(0));
6067 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
6068 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
6069
6070 if (resume)
6071 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
6072 sizeof(de_payload) >> 2);
6073 else
6074 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
6075 sizeof(de_payload) >> 2);
6076 }
6077
gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring * ring,bool start,bool secure)6078 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
6079 bool secure)
6080 {
6081 uint32_t v = secure ? FRAME_TMZ : 0;
6082
6083 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
6084 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
6085 }
6086
gfx_v11_0_ring_emit_rreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t reg_val_offs)6087 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6088 uint32_t reg_val_offs)
6089 {
6090 struct amdgpu_device *adev = ring->adev;
6091
6092 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6093 amdgpu_ring_write(ring, 0 | /* src: register*/
6094 (5 << 8) | /* dst: memory */
6095 (1 << 20)); /* write confirm */
6096 amdgpu_ring_write(ring, reg);
6097 amdgpu_ring_write(ring, 0);
6098 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6099 reg_val_offs * 4));
6100 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6101 reg_val_offs * 4));
6102 }
6103
gfx_v11_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)6104 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6105 uint32_t val)
6106 {
6107 uint32_t cmd = 0;
6108
6109 switch (ring->funcs->type) {
6110 case AMDGPU_RING_TYPE_GFX:
6111 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6112 break;
6113 case AMDGPU_RING_TYPE_KIQ:
6114 cmd = (1 << 16); /* no inc addr */
6115 break;
6116 default:
6117 cmd = WR_CONFIRM;
6118 break;
6119 }
6120 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6121 amdgpu_ring_write(ring, cmd);
6122 amdgpu_ring_write(ring, reg);
6123 amdgpu_ring_write(ring, 0);
6124 amdgpu_ring_write(ring, val);
6125 }
6126
gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)6127 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
6128 uint32_t val, uint32_t mask)
6129 {
6130 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
6131 }
6132
gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)6133 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
6134 uint32_t reg0, uint32_t reg1,
6135 uint32_t ref, uint32_t mask)
6136 {
6137 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6138
6139 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
6140 ref, mask, 0x20);
6141 }
6142
gfx_v11_0_ring_soft_recovery(struct amdgpu_ring * ring,unsigned vmid)6143 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
6144 unsigned vmid)
6145 {
6146 struct amdgpu_device *adev = ring->adev;
6147 uint32_t value = 0;
6148
6149 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6150 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6151 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6152 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6153 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
6154 WREG32_SOC15(GC, 0, regSQ_CMD, value);
6155 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
6156 }
6157
6158 static void
gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device * adev,uint32_t me,uint32_t pipe,enum amdgpu_interrupt_state state)6159 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6160 uint32_t me, uint32_t pipe,
6161 enum amdgpu_interrupt_state state)
6162 {
6163 uint32_t cp_int_cntl, cp_int_cntl_reg;
6164
6165 if (!me) {
6166 switch (pipe) {
6167 case 0:
6168 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
6169 break;
6170 case 1:
6171 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
6172 break;
6173 default:
6174 DRM_DEBUG("invalid pipe %d\n", pipe);
6175 return;
6176 }
6177 } else {
6178 DRM_DEBUG("invalid me %d\n", me);
6179 return;
6180 }
6181
6182 switch (state) {
6183 case AMDGPU_IRQ_STATE_DISABLE:
6184 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6185 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6186 TIME_STAMP_INT_ENABLE, 0);
6187 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6188 GENERIC0_INT_ENABLE, 0);
6189 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6190 break;
6191 case AMDGPU_IRQ_STATE_ENABLE:
6192 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6193 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6194 TIME_STAMP_INT_ENABLE, 1);
6195 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6196 GENERIC0_INT_ENABLE, 1);
6197 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6198 break;
6199 default:
6200 break;
6201 }
6202 }
6203
gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int me,int pipe,enum amdgpu_interrupt_state state)6204 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6205 int me, int pipe,
6206 enum amdgpu_interrupt_state state)
6207 {
6208 u32 mec_int_cntl, mec_int_cntl_reg;
6209
6210 /*
6211 * amdgpu controls only the first MEC. That's why this function only
6212 * handles the setting of interrupts for this specific MEC. All other
6213 * pipes' interrupts are set by amdkfd.
6214 */
6215
6216 if (me == 1) {
6217 switch (pipe) {
6218 case 0:
6219 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6220 break;
6221 case 1:
6222 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
6223 break;
6224 case 2:
6225 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
6226 break;
6227 case 3:
6228 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
6229 break;
6230 default:
6231 DRM_DEBUG("invalid pipe %d\n", pipe);
6232 return;
6233 }
6234 } else {
6235 DRM_DEBUG("invalid me %d\n", me);
6236 return;
6237 }
6238
6239 switch (state) {
6240 case AMDGPU_IRQ_STATE_DISABLE:
6241 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6242 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6243 TIME_STAMP_INT_ENABLE, 0);
6244 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6245 GENERIC0_INT_ENABLE, 0);
6246 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6247 break;
6248 case AMDGPU_IRQ_STATE_ENABLE:
6249 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6250 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6251 TIME_STAMP_INT_ENABLE, 1);
6252 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6253 GENERIC0_INT_ENABLE, 1);
6254 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6255 break;
6256 default:
6257 break;
6258 }
6259 }
6260
gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)6261 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6262 struct amdgpu_irq_src *src,
6263 unsigned type,
6264 enum amdgpu_interrupt_state state)
6265 {
6266 switch (type) {
6267 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6268 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
6269 break;
6270 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
6271 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
6272 break;
6273 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6274 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6275 break;
6276 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6277 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6278 break;
6279 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6280 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6281 break;
6282 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6283 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6284 break;
6285 default:
6286 break;
6287 }
6288 return 0;
6289 }
6290
gfx_v11_0_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6291 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
6292 struct amdgpu_irq_src *source,
6293 struct amdgpu_iv_entry *entry)
6294 {
6295 int i;
6296 u8 me_id, pipe_id, queue_id;
6297 struct amdgpu_ring *ring;
6298 uint32_t mes_queue_id = entry->src_data[0];
6299
6300 DRM_DEBUG("IH: CP EOP\n");
6301
6302 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
6303 struct amdgpu_mes_queue *queue;
6304
6305 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
6306
6307 spin_lock(&adev->mes.queue_id_lock);
6308 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
6309 if (queue) {
6310 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
6311 amdgpu_fence_process(queue->ring);
6312 }
6313 spin_unlock(&adev->mes.queue_id_lock);
6314 } else {
6315 me_id = (entry->ring_id & 0x0c) >> 2;
6316 pipe_id = (entry->ring_id & 0x03) >> 0;
6317 queue_id = (entry->ring_id & 0x70) >> 4;
6318
6319 switch (me_id) {
6320 case 0:
6321 if (pipe_id == 0)
6322 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6323 else
6324 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
6325 break;
6326 case 1:
6327 case 2:
6328 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6329 ring = &adev->gfx.compute_ring[i];
6330 /* Per-queue interrupt is supported for MEC starting from VI.
6331 * The interrupt can only be enabled/disabled per pipe instead
6332 * of per queue.
6333 */
6334 if ((ring->me == me_id) &&
6335 (ring->pipe == pipe_id) &&
6336 (ring->queue == queue_id))
6337 amdgpu_fence_process(ring);
6338 }
6339 break;
6340 }
6341 }
6342
6343 return 0;
6344 }
6345
gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int type,enum amdgpu_interrupt_state state)6346 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6347 struct amdgpu_irq_src *source,
6348 unsigned int type,
6349 enum amdgpu_interrupt_state state)
6350 {
6351 u32 cp_int_cntl_reg, cp_int_cntl;
6352 int i, j;
6353
6354 switch (state) {
6355 case AMDGPU_IRQ_STATE_DISABLE:
6356 case AMDGPU_IRQ_STATE_ENABLE:
6357 for (i = 0; i < adev->gfx.me.num_me; i++) {
6358 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6359 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6360
6361 if (cp_int_cntl_reg) {
6362 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6363 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6364 PRIV_REG_INT_ENABLE,
6365 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6366 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6367 }
6368 }
6369 }
6370 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6371 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6372 /* MECs start at 1 */
6373 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
6374
6375 if (cp_int_cntl_reg) {
6376 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6377 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6378 PRIV_REG_INT_ENABLE,
6379 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6380 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6381 }
6382 }
6383 }
6384 break;
6385 default:
6386 break;
6387 }
6388
6389 return 0;
6390 }
6391
gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)6392 static int gfx_v11_0_set_bad_op_fault_state(struct amdgpu_device *adev,
6393 struct amdgpu_irq_src *source,
6394 unsigned type,
6395 enum amdgpu_interrupt_state state)
6396 {
6397 u32 cp_int_cntl_reg, cp_int_cntl;
6398 int i, j;
6399
6400 switch (state) {
6401 case AMDGPU_IRQ_STATE_DISABLE:
6402 case AMDGPU_IRQ_STATE_ENABLE:
6403 for (i = 0; i < adev->gfx.me.num_me; i++) {
6404 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6405 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6406
6407 if (cp_int_cntl_reg) {
6408 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6409 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6410 OPCODE_ERROR_INT_ENABLE,
6411 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6412 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6413 }
6414 }
6415 }
6416 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6417 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6418 /* MECs start at 1 */
6419 cp_int_cntl_reg = gfx_v11_0_get_cpc_int_cntl(adev, i + 1, j);
6420
6421 if (cp_int_cntl_reg) {
6422 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6423 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6424 OPCODE_ERROR_INT_ENABLE,
6425 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6426 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6427 }
6428 }
6429 }
6430 break;
6431 default:
6432 break;
6433 }
6434 return 0;
6435 }
6436
gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int type,enum amdgpu_interrupt_state state)6437 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6438 struct amdgpu_irq_src *source,
6439 unsigned int type,
6440 enum amdgpu_interrupt_state state)
6441 {
6442 u32 cp_int_cntl_reg, cp_int_cntl;
6443 int i, j;
6444
6445 switch (state) {
6446 case AMDGPU_IRQ_STATE_DISABLE:
6447 case AMDGPU_IRQ_STATE_ENABLE:
6448 for (i = 0; i < adev->gfx.me.num_me; i++) {
6449 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6450 cp_int_cntl_reg = gfx_v11_0_get_cpg_int_cntl(adev, i, j);
6451
6452 if (cp_int_cntl_reg) {
6453 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
6454 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
6455 PRIV_INSTR_INT_ENABLE,
6456 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6457 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
6458 }
6459 }
6460 }
6461 break;
6462 default:
6463 break;
6464 }
6465
6466 return 0;
6467 }
6468
gfx_v11_0_handle_priv_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)6469 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
6470 struct amdgpu_iv_entry *entry)
6471 {
6472 u8 me_id, pipe_id, queue_id;
6473 struct amdgpu_ring *ring;
6474 int i;
6475
6476 me_id = (entry->ring_id & 0x0c) >> 2;
6477 pipe_id = (entry->ring_id & 0x03) >> 0;
6478 queue_id = (entry->ring_id & 0x70) >> 4;
6479
6480 switch (me_id) {
6481 case 0:
6482 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
6483 ring = &adev->gfx.gfx_ring[i];
6484 if (ring->me == me_id && ring->pipe == pipe_id &&
6485 ring->queue == queue_id)
6486 drm_sched_fault(&ring->sched);
6487 }
6488 break;
6489 case 1:
6490 case 2:
6491 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6492 ring = &adev->gfx.compute_ring[i];
6493 if (ring->me == me_id && ring->pipe == pipe_id &&
6494 ring->queue == queue_id)
6495 drm_sched_fault(&ring->sched);
6496 }
6497 break;
6498 default:
6499 BUG();
6500 break;
6501 }
6502 }
6503
gfx_v11_0_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6504 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
6505 struct amdgpu_irq_src *source,
6506 struct amdgpu_iv_entry *entry)
6507 {
6508 DRM_ERROR("Illegal register access in command stream\n");
6509 gfx_v11_0_handle_priv_fault(adev, entry);
6510 return 0;
6511 }
6512
gfx_v11_0_bad_op_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6513 static int gfx_v11_0_bad_op_irq(struct amdgpu_device *adev,
6514 struct amdgpu_irq_src *source,
6515 struct amdgpu_iv_entry *entry)
6516 {
6517 DRM_ERROR("Illegal opcode in command stream \n");
6518 gfx_v11_0_handle_priv_fault(adev, entry);
6519 return 0;
6520 }
6521
gfx_v11_0_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6522 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
6523 struct amdgpu_irq_src *source,
6524 struct amdgpu_iv_entry *entry)
6525 {
6526 DRM_ERROR("Illegal instruction in command stream\n");
6527 gfx_v11_0_handle_priv_fault(adev, entry);
6528 return 0;
6529 }
6530
gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)6531 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
6532 struct amdgpu_irq_src *source,
6533 struct amdgpu_iv_entry *entry)
6534 {
6535 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
6536 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
6537
6538 return 0;
6539 }
6540
6541 #if 0
6542 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6543 struct amdgpu_irq_src *src,
6544 unsigned int type,
6545 enum amdgpu_interrupt_state state)
6546 {
6547 uint32_t tmp, target;
6548 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
6549
6550 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6551 target += ring->pipe;
6552
6553 switch (type) {
6554 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6555 if (state == AMDGPU_IRQ_STATE_DISABLE) {
6556 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6557 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6558 GENERIC2_INT_ENABLE, 0);
6559 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6560
6561 tmp = RREG32_SOC15_IP(GC, target);
6562 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6563 GENERIC2_INT_ENABLE, 0);
6564 WREG32_SOC15_IP(GC, target, tmp);
6565 } else {
6566 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6567 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6568 GENERIC2_INT_ENABLE, 1);
6569 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6570
6571 tmp = RREG32_SOC15_IP(GC, target);
6572 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6573 GENERIC2_INT_ENABLE, 1);
6574 WREG32_SOC15_IP(GC, target, tmp);
6575 }
6576 break;
6577 default:
6578 BUG(); /* kiq only support GENERIC2_INT now */
6579 break;
6580 }
6581 return 0;
6582 }
6583 #endif
6584
gfx_v11_0_emit_mem_sync(struct amdgpu_ring * ring)6585 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6586 {
6587 const unsigned int gcr_cntl =
6588 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6589 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6590 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6591 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6592 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6593 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6594 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6595 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6596
6597 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6598 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6599 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6600 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6601 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
6602 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6603 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
6604 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6605 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6606 }
6607
gfx_v11_0_reset_kgq(struct amdgpu_ring * ring,unsigned int vmid)6608 static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
6609 {
6610 struct amdgpu_device *adev = ring->adev;
6611 int r;
6612
6613 if (amdgpu_sriov_vf(adev))
6614 return -EINVAL;
6615
6616 r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
6617 if (r)
6618 return r;
6619
6620 r = amdgpu_bo_reserve(ring->mqd_obj, false);
6621 if (unlikely(r != 0)) {
6622 dev_err(adev->dev, "fail to resv mqd_obj\n");
6623 return r;
6624 }
6625 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
6626 if (!r) {
6627 r = gfx_v11_0_kgq_init_queue(ring, true);
6628 amdgpu_bo_kunmap(ring->mqd_obj);
6629 ring->mqd_ptr = NULL;
6630 }
6631 amdgpu_bo_unreserve(ring->mqd_obj);
6632 if (r) {
6633 dev_err(adev->dev, "fail to unresv mqd_obj\n");
6634 return r;
6635 }
6636
6637 r = amdgpu_mes_map_legacy_queue(adev, ring);
6638 if (r) {
6639 dev_err(adev->dev, "failed to remap kgq\n");
6640 return r;
6641 }
6642
6643 return amdgpu_ring_test_ring(ring);
6644 }
6645
gfx_v11_0_reset_kcq(struct amdgpu_ring * ring,unsigned int vmid)6646 static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
6647 {
6648 struct amdgpu_device *adev = ring->adev;
6649 int i, r = 0;
6650
6651 if (amdgpu_sriov_vf(adev))
6652 return -EINVAL;
6653
6654 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
6655 mutex_lock(&adev->srbm_mutex);
6656 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
6657 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
6658 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
6659
6660 /* make sure dequeue is complete*/
6661 for (i = 0; i < adev->usec_timeout; i++) {
6662 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
6663 break;
6664 udelay(1);
6665 }
6666 if (i >= adev->usec_timeout)
6667 r = -ETIMEDOUT;
6668 soc21_grbm_select(adev, 0, 0, 0, 0);
6669 mutex_unlock(&adev->srbm_mutex);
6670 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
6671 if (r) {
6672 dev_err(adev->dev, "fail to wait on hqd deactivate\n");
6673 return r;
6674 }
6675
6676 r = amdgpu_bo_reserve(ring->mqd_obj, false);
6677 if (unlikely(r != 0)) {
6678 dev_err(adev->dev, "fail to resv mqd_obj\n");
6679 return r;
6680 }
6681 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
6682 if (!r) {
6683 r = gfx_v11_0_kcq_init_queue(ring, true);
6684 amdgpu_bo_kunmap(ring->mqd_obj);
6685 ring->mqd_ptr = NULL;
6686 }
6687 amdgpu_bo_unreserve(ring->mqd_obj);
6688 if (r) {
6689 dev_err(adev->dev, "fail to unresv mqd_obj\n");
6690 return r;
6691 }
6692 r = amdgpu_mes_map_legacy_queue(adev, ring);
6693 if (r) {
6694 dev_err(adev->dev, "failed to remap kcq\n");
6695 return r;
6696 }
6697
6698 return amdgpu_ring_test_ring(ring);
6699 }
6700
gfx_v11_ip_print(struct amdgpu_ip_block * ip_block,struct drm_printer * p)6701 static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
6702 {
6703 struct amdgpu_device *adev = ip_block->adev;
6704 uint32_t i, j, k, reg, index = 0;
6705 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
6706
6707 if (!adev->gfx.ip_dump_core)
6708 return;
6709
6710 for (i = 0; i < reg_count; i++)
6711 drm_printf(p, "%-50s \t 0x%08x\n",
6712 gc_reg_list_11_0[i].reg_name,
6713 adev->gfx.ip_dump_core[i]);
6714
6715 /* print compute queue registers for all instances */
6716 if (!adev->gfx.ip_dump_compute_queues)
6717 return;
6718
6719 reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
6720 drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
6721 adev->gfx.mec.num_mec,
6722 adev->gfx.mec.num_pipe_per_mec,
6723 adev->gfx.mec.num_queue_per_pipe);
6724
6725 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6726 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6727 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
6728 drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
6729 for (reg = 0; reg < reg_count; reg++) {
6730 drm_printf(p, "%-50s \t 0x%08x\n",
6731 gc_cp_reg_list_11[reg].reg_name,
6732 adev->gfx.ip_dump_compute_queues[index + reg]);
6733 }
6734 index += reg_count;
6735 }
6736 }
6737 }
6738
6739 /* print gfx queue registers for all instances */
6740 if (!adev->gfx.ip_dump_gfx_queues)
6741 return;
6742
6743 index = 0;
6744 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
6745 drm_printf(p, "\nnum_me: %d num_pipe: %d num_queue: %d\n",
6746 adev->gfx.me.num_me,
6747 adev->gfx.me.num_pipe_per_me,
6748 adev->gfx.me.num_queue_per_pipe);
6749
6750 for (i = 0; i < adev->gfx.me.num_me; i++) {
6751 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6752 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
6753 drm_printf(p, "\nme %d, pipe %d, queue %d\n", i, j, k);
6754 for (reg = 0; reg < reg_count; reg++) {
6755 drm_printf(p, "%-50s \t 0x%08x\n",
6756 gc_gfx_queue_reg_list_11[reg].reg_name,
6757 adev->gfx.ip_dump_gfx_queues[index + reg]);
6758 }
6759 index += reg_count;
6760 }
6761 }
6762 }
6763 }
6764
gfx_v11_ip_dump(struct amdgpu_ip_block * ip_block)6765 static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
6766 {
6767 struct amdgpu_device *adev = ip_block->adev;
6768 uint32_t i, j, k, reg, index = 0;
6769 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
6770
6771 if (!adev->gfx.ip_dump_core)
6772 return;
6773
6774 amdgpu_gfx_off_ctrl(adev, false);
6775 for (i = 0; i < reg_count; i++)
6776 adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_11_0[i]));
6777 amdgpu_gfx_off_ctrl(adev, true);
6778
6779 /* dump compute queue registers for all instances */
6780 if (!adev->gfx.ip_dump_compute_queues)
6781 return;
6782
6783 reg_count = ARRAY_SIZE(gc_cp_reg_list_11);
6784 amdgpu_gfx_off_ctrl(adev, false);
6785 mutex_lock(&adev->srbm_mutex);
6786 for (i = 0; i < adev->gfx.mec.num_mec; i++) {
6787 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
6788 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
6789 /* ME0 is for GFX so start from 1 for CP */
6790 soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
6791 for (reg = 0; reg < reg_count; reg++) {
6792 adev->gfx.ip_dump_compute_queues[index + reg] =
6793 RREG32(SOC15_REG_ENTRY_OFFSET(
6794 gc_cp_reg_list_11[reg]));
6795 }
6796 index += reg_count;
6797 }
6798 }
6799 }
6800 soc21_grbm_select(adev, 0, 0, 0, 0);
6801 mutex_unlock(&adev->srbm_mutex);
6802 amdgpu_gfx_off_ctrl(adev, true);
6803
6804 /* dump gfx queue registers for all instances */
6805 if (!adev->gfx.ip_dump_gfx_queues)
6806 return;
6807
6808 index = 0;
6809 reg_count = ARRAY_SIZE(gc_gfx_queue_reg_list_11);
6810 amdgpu_gfx_off_ctrl(adev, false);
6811 mutex_lock(&adev->srbm_mutex);
6812 for (i = 0; i < adev->gfx.me.num_me; i++) {
6813 for (j = 0; j < adev->gfx.me.num_pipe_per_me; j++) {
6814 for (k = 0; k < adev->gfx.me.num_queue_per_pipe; k++) {
6815 soc21_grbm_select(adev, i, j, k, 0);
6816
6817 for (reg = 0; reg < reg_count; reg++) {
6818 adev->gfx.ip_dump_gfx_queues[index + reg] =
6819 RREG32(SOC15_REG_ENTRY_OFFSET(
6820 gc_gfx_queue_reg_list_11[reg]));
6821 }
6822 index += reg_count;
6823 }
6824 }
6825 }
6826 soc21_grbm_select(adev, 0, 0, 0, 0);
6827 mutex_unlock(&adev->srbm_mutex);
6828 amdgpu_gfx_off_ctrl(adev, true);
6829 }
6830
gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring * ring)6831 static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
6832 {
6833 /* Emit the cleaner shader */
6834 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
6835 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
6836 }
6837
6838 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
6839 .name = "gfx_v11_0",
6840 .early_init = gfx_v11_0_early_init,
6841 .late_init = gfx_v11_0_late_init,
6842 .sw_init = gfx_v11_0_sw_init,
6843 .sw_fini = gfx_v11_0_sw_fini,
6844 .hw_init = gfx_v11_0_hw_init,
6845 .hw_fini = gfx_v11_0_hw_fini,
6846 .suspend = gfx_v11_0_suspend,
6847 .resume = gfx_v11_0_resume,
6848 .is_idle = gfx_v11_0_is_idle,
6849 .wait_for_idle = gfx_v11_0_wait_for_idle,
6850 .soft_reset = gfx_v11_0_soft_reset,
6851 .check_soft_reset = gfx_v11_0_check_soft_reset,
6852 .post_soft_reset = gfx_v11_0_post_soft_reset,
6853 .set_clockgating_state = gfx_v11_0_set_clockgating_state,
6854 .set_powergating_state = gfx_v11_0_set_powergating_state,
6855 .get_clockgating_state = gfx_v11_0_get_clockgating_state,
6856 .dump_ip_state = gfx_v11_ip_dump,
6857 .print_ip_state = gfx_v11_ip_print,
6858 };
6859
6860 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
6861 .type = AMDGPU_RING_TYPE_GFX,
6862 .align_mask = 0xff,
6863 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6864 .support_64bit_ptrs = true,
6865 .secure_submission_supported = true,
6866 .get_rptr = gfx_v11_0_ring_get_rptr_gfx,
6867 .get_wptr = gfx_v11_0_ring_get_wptr_gfx,
6868 .set_wptr = gfx_v11_0_ring_set_wptr_gfx,
6869 .emit_frame_size = /* totally 247 maximum if 16 IBs */
6870 5 + /* update_spm_vmid */
6871 5 + /* COND_EXEC */
6872 22 + /* SET_Q_PREEMPTION_MODE */
6873 7 + /* PIPELINE_SYNC */
6874 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6875 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6876 4 + /* VM_FLUSH */
6877 8 + /* FENCE for VM_FLUSH */
6878 20 + /* GDS switch */
6879 5 + /* COND_EXEC */
6880 7 + /* HDP_flush */
6881 4 + /* VGT_flush */
6882 31 + /* DE_META */
6883 3 + /* CNTX_CTRL */
6884 5 + /* HDP_INVL */
6885 22 + /* SET_Q_PREEMPTION_MODE */
6886 8 + 8 + /* FENCE x2 */
6887 8 + /* gfx_v11_0_emit_mem_sync */
6888 2, /* gfx_v11_0_ring_emit_cleaner_shader */
6889 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
6890 .emit_ib = gfx_v11_0_ring_emit_ib_gfx,
6891 .emit_fence = gfx_v11_0_ring_emit_fence,
6892 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6893 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6894 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6895 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6896 .test_ring = gfx_v11_0_ring_test_ring,
6897 .test_ib = gfx_v11_0_ring_test_ib,
6898 .insert_nop = gfx_v11_ring_insert_nop,
6899 .pad_ib = amdgpu_ring_generic_pad_ib,
6900 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
6901 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
6902 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
6903 .preempt_ib = gfx_v11_0_ring_preempt_ib,
6904 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
6905 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6906 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6907 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6908 .soft_recovery = gfx_v11_0_ring_soft_recovery,
6909 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6910 .reset = gfx_v11_0_reset_kgq,
6911 .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
6912 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
6913 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
6914 };
6915
6916 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
6917 .type = AMDGPU_RING_TYPE_COMPUTE,
6918 .align_mask = 0xff,
6919 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6920 .support_64bit_ptrs = true,
6921 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6922 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6923 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6924 .emit_frame_size =
6925 5 + /* update_spm_vmid */
6926 20 + /* gfx_v11_0_ring_emit_gds_switch */
6927 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6928 5 + /* hdp invalidate */
6929 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6930 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6931 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6932 2 + /* gfx_v11_0_ring_emit_vm_flush */
6933 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
6934 8 + /* gfx_v11_0_emit_mem_sync */
6935 2, /* gfx_v11_0_ring_emit_cleaner_shader */
6936 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6937 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6938 .emit_fence = gfx_v11_0_ring_emit_fence,
6939 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6940 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6941 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6942 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6943 .test_ring = gfx_v11_0_ring_test_ring,
6944 .test_ib = gfx_v11_0_ring_test_ib,
6945 .insert_nop = gfx_v11_ring_insert_nop,
6946 .pad_ib = amdgpu_ring_generic_pad_ib,
6947 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6948 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6949 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6950 .soft_recovery = gfx_v11_0_ring_soft_recovery,
6951 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6952 .reset = gfx_v11_0_reset_kcq,
6953 .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
6954 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
6955 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
6956 };
6957
6958 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
6959 .type = AMDGPU_RING_TYPE_KIQ,
6960 .align_mask = 0xff,
6961 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6962 .support_64bit_ptrs = true,
6963 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6964 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6965 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6966 .emit_frame_size =
6967 20 + /* gfx_v11_0_ring_emit_gds_switch */
6968 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6969 5 + /*hdp invalidate */
6970 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6971 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6972 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6973 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6974 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6975 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6976 .emit_fence = gfx_v11_0_ring_emit_fence_kiq,
6977 .test_ring = gfx_v11_0_ring_test_ring,
6978 .test_ib = gfx_v11_0_ring_test_ib,
6979 .insert_nop = amdgpu_ring_insert_nop,
6980 .pad_ib = amdgpu_ring_generic_pad_ib,
6981 .emit_rreg = gfx_v11_0_ring_emit_rreg,
6982 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6983 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6984 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6985 };
6986
gfx_v11_0_set_ring_funcs(struct amdgpu_device * adev)6987 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
6988 {
6989 int i;
6990
6991 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
6992
6993 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6994 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
6995
6996 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6997 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
6998 }
6999
7000 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
7001 .set = gfx_v11_0_set_eop_interrupt_state,
7002 .process = gfx_v11_0_eop_irq,
7003 };
7004
7005 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
7006 .set = gfx_v11_0_set_priv_reg_fault_state,
7007 .process = gfx_v11_0_priv_reg_irq,
7008 };
7009
7010 static const struct amdgpu_irq_src_funcs gfx_v11_0_bad_op_irq_funcs = {
7011 .set = gfx_v11_0_set_bad_op_fault_state,
7012 .process = gfx_v11_0_bad_op_irq,
7013 };
7014
7015 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
7016 .set = gfx_v11_0_set_priv_inst_fault_state,
7017 .process = gfx_v11_0_priv_inst_irq,
7018 };
7019
7020 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
7021 .process = gfx_v11_0_rlc_gc_fed_irq,
7022 };
7023
gfx_v11_0_set_irq_funcs(struct amdgpu_device * adev)7024 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
7025 {
7026 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7027 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
7028
7029 adev->gfx.priv_reg_irq.num_types = 1;
7030 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
7031
7032 adev->gfx.bad_op_irq.num_types = 1;
7033 adev->gfx.bad_op_irq.funcs = &gfx_v11_0_bad_op_irq_funcs;
7034
7035 adev->gfx.priv_inst_irq.num_types = 1;
7036 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
7037
7038 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
7039 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
7040
7041 }
7042
gfx_v11_0_set_imu_funcs(struct amdgpu_device * adev)7043 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
7044 {
7045 if (adev->flags & AMD_IS_APU)
7046 adev->gfx.imu.mode = MISSION_MODE;
7047 else
7048 adev->gfx.imu.mode = DEBUG_MODE;
7049
7050 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
7051 }
7052
gfx_v11_0_set_rlc_funcs(struct amdgpu_device * adev)7053 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
7054 {
7055 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
7056 }
7057
gfx_v11_0_set_gds_init(struct amdgpu_device * adev)7058 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
7059 {
7060 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
7061 adev->gfx.config.max_sh_per_se *
7062 adev->gfx.config.max_shader_engines;
7063
7064 adev->gds.gds_size = 0x1000;
7065 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
7066 adev->gds.gws_size = 64;
7067 adev->gds.oa_size = 16;
7068 }
7069
gfx_v11_0_set_mqd_funcs(struct amdgpu_device * adev)7070 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
7071 {
7072 /* set gfx eng mqd */
7073 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
7074 sizeof(struct v11_gfx_mqd);
7075 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
7076 gfx_v11_0_gfx_mqd_init;
7077 /* set compute eng mqd */
7078 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
7079 sizeof(struct v11_compute_mqd);
7080 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
7081 gfx_v11_0_compute_mqd_init;
7082 }
7083
gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device * adev,u32 bitmap)7084 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
7085 u32 bitmap)
7086 {
7087 u32 data;
7088
7089 if (!bitmap)
7090 return;
7091
7092 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
7093 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
7094
7095 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
7096 }
7097
gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device * adev)7098 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
7099 {
7100 u32 data, wgp_bitmask;
7101 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
7102 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
7103
7104 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
7105 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
7106
7107 wgp_bitmask =
7108 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
7109
7110 return (~data) & wgp_bitmask;
7111 }
7112
gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device * adev)7113 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
7114 {
7115 u32 wgp_idx, wgp_active_bitmap;
7116 u32 cu_bitmap_per_wgp, cu_active_bitmap;
7117
7118 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
7119 cu_active_bitmap = 0;
7120
7121 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
7122 /* if there is one WGP enabled, it means 2 CUs will be enabled */
7123 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
7124 if (wgp_active_bitmap & (1 << wgp_idx))
7125 cu_active_bitmap |= cu_bitmap_per_wgp;
7126 }
7127
7128 return cu_active_bitmap;
7129 }
7130
gfx_v11_0_get_cu_info(struct amdgpu_device * adev,struct amdgpu_cu_info * cu_info)7131 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
7132 struct amdgpu_cu_info *cu_info)
7133 {
7134 int i, j, k, counter, active_cu_number = 0;
7135 u32 mask, bitmap;
7136 unsigned disable_masks[8 * 2];
7137
7138 if (!adev || !cu_info)
7139 return -EINVAL;
7140
7141 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
7142
7143 mutex_lock(&adev->grbm_idx_mutex);
7144 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7145 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7146 bitmap = i * adev->gfx.config.max_sh_per_se + j;
7147 if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
7148 continue;
7149 mask = 1;
7150 counter = 0;
7151 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
7152 if (i < 8 && j < 2)
7153 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
7154 adev, disable_masks[i * 2 + j]);
7155 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
7156
7157 /**
7158 * GFX11 could support more than 4 SEs, while the bitmap
7159 * in cu_info struct is 4x4 and ioctl interface struct
7160 * drm_amdgpu_info_device should keep stable.
7161 * So we use last two columns of bitmap to store cu mask for
7162 * SEs 4 to 7, the layout of the bitmap is as below:
7163 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
7164 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
7165 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
7166 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
7167 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
7168 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
7169 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
7170 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
7171 */
7172 cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
7173
7174 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
7175 if (bitmap & mask)
7176 counter++;
7177
7178 mask <<= 1;
7179 }
7180 active_cu_number += counter;
7181 }
7182 }
7183 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7184 mutex_unlock(&adev->grbm_idx_mutex);
7185
7186 cu_info->number = active_cu_number;
7187 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7188
7189 return 0;
7190 }
7191
7192 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
7193 {
7194 .type = AMD_IP_BLOCK_TYPE_GFX,
7195 .major = 11,
7196 .minor = 0,
7197 .rev = 0,
7198 .funcs = &gfx_v11_0_ip_funcs,
7199 };
7200