1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
32
33 #include "gc/gc_11_0_0_offset.h"
34 #include "gc/gc_11_0_0_sh_mask.h"
35 #include "gc/gc_11_0_0_default.h"
36 #include "hdp/hdp_6_0_0_offset.h"
37 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
38
39 #include "soc15_common.h"
40 #include "soc15.h"
41 #include "sdma_v6_0_0_pkt_open.h"
42 #include "nbio_v4_3.h"
43 #include "sdma_common.h"
44 #include "sdma_v6_0.h"
45 #include "v11_structs.h"
46 #include "mes_userqueue.h"
47 #include "amdgpu_userq_fence.h"
48
49 MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
50 MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
51 MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
52 MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
53 MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin");
54 MODULE_FIRMWARE("amdgpu/sdma_6_1_1.bin");
55 MODULE_FIRMWARE("amdgpu/sdma_6_1_2.bin");
56 MODULE_FIRMWARE("amdgpu/sdma_6_1_3.bin");
57 MODULE_FIRMWARE("amdgpu/sdma_6_1_4.bin");
58
59 #define SDMA1_REG_OFFSET 0x600
60 #define SDMA0_HYP_DEC_REG_START 0x5880
61 #define SDMA0_HYP_DEC_REG_END 0x589a
62 #define SDMA1_HYP_DEC_REG_OFFSET 0x20
63
64 static const struct amdgpu_hwip_reg_entry sdma_reg_list_6_0[] = {
65 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
66 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
67 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS2_REG),
68 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS3_REG),
69 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS4_REG),
70 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS5_REG),
71 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS6_REG),
72 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UCODE_CHECKSUM),
73 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH_HI),
74 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_RB_RPTR_FETCH),
75 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_STATUS),
76 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_STATUS),
77 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK0),
78 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_RD_XNACK1),
79 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK0),
80 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_UTCL1_WR_XNACK1),
81 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_CNTL),
82 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR),
83 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_RPTR_HI),
84 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR),
85 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_RB_WPTR_HI),
86 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_OFFSET),
87 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_LO),
88 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_BASE_HI),
89 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_CNTL),
90 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_RPTR),
91 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_IB_SUB_REMAIN),
92 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE0_DUMMY_REG),
93 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE_STATUS0),
94 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_CNTL),
95 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR),
96 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_RPTR_HI),
97 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR),
98 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_RB_WPTR_HI),
99 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_OFFSET),
100 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_LO),
101 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_BASE_HI),
102 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_RPTR),
103 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_IB_SUB_REMAIN),
104 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE1_DUMMY_REG),
105 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_CNTL),
106 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR),
107 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_RPTR_HI),
108 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR),
109 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_RB_WPTR_HI),
110 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_OFFSET),
111 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_LO),
112 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_BASE_HI),
113 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_RPTR),
114 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_IB_SUB_REMAIN),
115 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_QUEUE2_DUMMY_REG),
116 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_INT_STATUS),
117 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
118 SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_CHICKEN_BITS),
119 };
120
121 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
122 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
123 static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
124 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev);
125 static int sdma_v6_0_start(struct amdgpu_device *adev);
126
sdma_v6_0_get_reg_offset(struct amdgpu_device * adev,u32 instance,u32 internal_offset)127 static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
128 {
129 u32 base;
130
131 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
132 internal_offset <= SDMA0_HYP_DEC_REG_END) {
133 base = adev->reg_offset[GC_HWIP][0][1];
134 if (instance != 0)
135 internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
136 } else {
137 base = adev->reg_offset[GC_HWIP][0][0];
138 if (instance == 1)
139 internal_offset += SDMA1_REG_OFFSET;
140 }
141
142 return base + internal_offset;
143 }
144
sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring * ring,uint64_t addr)145 static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring,
146 uint64_t addr)
147 {
148 unsigned ret;
149
150 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COND_EXE));
151 amdgpu_ring_write(ring, lower_32_bits(addr));
152 amdgpu_ring_write(ring, upper_32_bits(addr));
153 amdgpu_ring_write(ring, 1);
154 /* this is the offset we need patch later */
155 ret = ring->wptr & ring->buf_mask;
156 /* insert dummy here and patch it later */
157 amdgpu_ring_write(ring, 0);
158
159 return ret;
160 }
161
162 /**
163 * sdma_v6_0_ring_get_rptr - get the current read pointer
164 *
165 * @ring: amdgpu ring pointer
166 *
167 * Get the current rptr from the hardware.
168 */
sdma_v6_0_ring_get_rptr(struct amdgpu_ring * ring)169 static uint64_t sdma_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
170 {
171 u64 *rptr;
172
173 /* XXX check if swapping is necessary on BE */
174 rptr = (u64 *)ring->rptr_cpu_addr;
175
176 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
177 return ((*rptr) >> 2);
178 }
179
180 /**
181 * sdma_v6_0_ring_get_wptr - get the current write pointer
182 *
183 * @ring: amdgpu ring pointer
184 *
185 * Get the current wptr from the hardware.
186 */
sdma_v6_0_ring_get_wptr(struct amdgpu_ring * ring)187 static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
188 {
189 u64 wptr = 0;
190
191 if (ring->use_doorbell) {
192 /* XXX check if swapping is necessary on BE */
193 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
194 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
195 }
196
197 return wptr >> 2;
198 }
199
200 /**
201 * sdma_v6_0_ring_set_wptr - commit the write pointer
202 *
203 * @ring: amdgpu ring pointer
204 *
205 * Write the wptr back to the hardware.
206 */
sdma_v6_0_ring_set_wptr(struct amdgpu_ring * ring)207 static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
208 {
209 struct amdgpu_device *adev = ring->adev;
210
211 if (ring->use_doorbell) {
212 DRM_DEBUG("Using doorbell -- "
213 "wptr_offs == 0x%08x "
214 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
215 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
216 ring->wptr_offs,
217 lower_32_bits(ring->wptr << 2),
218 upper_32_bits(ring->wptr << 2));
219 /* XXX check if swapping is necessary on BE */
220 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
221 ring->wptr << 2);
222 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
223 ring->doorbell_index, ring->wptr << 2);
224 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
225 } else {
226 DRM_DEBUG("Not using doorbell -- "
227 "regSDMA%i_GFX_RB_WPTR == 0x%08x "
228 "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
229 ring->me,
230 lower_32_bits(ring->wptr << 2),
231 ring->me,
232 upper_32_bits(ring->wptr << 2));
233 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
234 ring->me, regSDMA0_QUEUE0_RB_WPTR),
235 lower_32_bits(ring->wptr << 2));
236 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
237 ring->me, regSDMA0_QUEUE0_RB_WPTR_HI),
238 upper_32_bits(ring->wptr << 2));
239 }
240 }
241
sdma_v6_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)242 static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
243 {
244 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
245 int i;
246
247 for (i = 0; i < count; i++)
248 if (sdma && sdma->burst_nop && (i == 0))
249 amdgpu_ring_write(ring, ring->funcs->nop |
250 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
251 else
252 amdgpu_ring_write(ring, ring->funcs->nop);
253 }
254
255 /*
256 * sdma_v6_0_ring_emit_ib - Schedule an IB on the DMA engine
257 *
258 * @ring: amdgpu ring pointer
259 * @ib: IB object to schedule
260 * @flags: unused
261 * @job: job to retrieve vmid from
262 *
263 * Schedule an IB in the DMA ring.
264 */
sdma_v6_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)265 static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
266 struct amdgpu_job *job,
267 struct amdgpu_ib *ib,
268 uint32_t flags)
269 {
270 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
271 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
272
273 /* An IB packet must end on a 8 DW boundary--the next dword
274 * must be on a 8-dword boundary. Our IB packet below is 6
275 * dwords long, thus add x number of NOPs, such that, in
276 * modular arithmetic,
277 * wptr + 6 + x = 8k, k >= 0, which in C is,
278 * (wptr + 6 + x) % 8 = 0.
279 * The expression below, is a solution of x.
280 */
281 sdma_v6_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
282
283 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_INDIRECT) |
284 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
285 /* base must be 32 byte aligned */
286 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
287 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
288 amdgpu_ring_write(ring, ib->length_dw);
289 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
290 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
291 }
292
293 /**
294 * sdma_v6_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
295 *
296 * @ring: amdgpu ring pointer
297 *
298 * flush the IB by graphics cache rinse.
299 */
sdma_v6_0_ring_emit_mem_sync(struct amdgpu_ring * ring)300 static void sdma_v6_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
301 {
302 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
303 SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
304 SDMA_GCR_GLI_INV(1);
305
306 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
307 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_GCR_REQ));
308 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
309 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
310 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
311 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
312 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
313 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
314 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
315 }
316
317
318 /**
319 * sdma_v6_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
320 *
321 * @ring: amdgpu ring pointer
322 *
323 * Emit an hdp flush packet on the requested DMA ring.
324 */
sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)325 static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
326 {
327 struct amdgpu_device *adev = ring->adev;
328 u32 ref_and_mask = 0;
329 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
330
331 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
332
333 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
334 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
335 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
336 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
337 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
338 amdgpu_ring_write(ring, ref_and_mask); /* reference */
339 amdgpu_ring_write(ring, ref_and_mask); /* mask */
340 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
341 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
342 }
343
344 /**
345 * sdma_v6_0_ring_emit_fence - emit a fence on the DMA ring
346 *
347 * @ring: amdgpu ring pointer
348 * @addr: address
349 * @seq: fence seq number
350 * @flags: fence flags
351 *
352 * Add a DMA fence packet to the ring to write
353 * the fence seq number and DMA trap packet to generate
354 * an interrupt if needed.
355 */
sdma_v6_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)356 static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
357 unsigned flags)
358 {
359 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
360 /* write the fence */
361 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
362 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
363 /* zero in first two bits */
364 BUG_ON(addr & 0x3);
365 amdgpu_ring_write(ring, lower_32_bits(addr));
366 amdgpu_ring_write(ring, upper_32_bits(addr));
367 amdgpu_ring_write(ring, lower_32_bits(seq));
368
369 /* optionally write high bits as well */
370 if (write64bit) {
371 addr += 4;
372 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
373 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
374 /* zero in first two bits */
375 BUG_ON(addr & 0x3);
376 amdgpu_ring_write(ring, lower_32_bits(addr));
377 amdgpu_ring_write(ring, upper_32_bits(addr));
378 amdgpu_ring_write(ring, upper_32_bits(seq));
379 }
380
381 if (flags & AMDGPU_FENCE_FLAG_INT) {
382 /* generate an interrupt */
383 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
384 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
385 }
386 }
387
388 /**
389 * sdma_v6_0_gfx_stop - stop the gfx async dma engines
390 *
391 * @adev: amdgpu_device pointer
392 *
393 * Stop the gfx async dma ring buffers.
394 */
sdma_v6_0_gfx_stop(struct amdgpu_device * adev)395 static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
396 {
397 u32 rb_cntl, ib_cntl;
398 int i;
399
400 for (i = 0; i < adev->sdma.num_instances; i++) {
401 rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
402 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0);
403 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
404 ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
405 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
406 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
407 }
408 }
409
410 /**
411 * sdma_v6_0_rlc_stop - stop the compute async dma engines
412 *
413 * @adev: amdgpu_device pointer
414 *
415 * Stop the compute async dma queues.
416 */
sdma_v6_0_rlc_stop(struct amdgpu_device * adev)417 static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev)
418 {
419 /* XXX todo */
420 }
421
422 /**
423 * sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts
424 *
425 * @adev: amdgpu_device pointer
426 * @enable: enable/disable context switching due to queue empty conditions
427 *
428 * Enable or disable the async dma engines queue empty context switch.
429 */
sdma_v6_0_ctxempty_int_enable(struct amdgpu_device * adev,bool enable)430 static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable)
431 {
432 u32 f32_cntl;
433 int i;
434
435 if (!amdgpu_sriov_vf(adev)) {
436 for (i = 0; i < adev->sdma.num_instances; i++) {
437 f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL));
438 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
439 CTXEMPTY_INT_ENABLE, enable ? 1 : 0);
440 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl);
441 }
442 }
443 }
444
445 /**
446 * sdma_v6_0_enable - stop the async dma engines
447 *
448 * @adev: amdgpu_device pointer
449 * @enable: enable/disable the DMA MEs.
450 *
451 * Halt or unhalt the async dma engines.
452 */
sdma_v6_0_enable(struct amdgpu_device * adev,bool enable)453 static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable)
454 {
455 u32 f32_cntl;
456 int i;
457
458 if (!enable) {
459 sdma_v6_0_gfx_stop(adev);
460 sdma_v6_0_rlc_stop(adev);
461 }
462
463 if (amdgpu_sriov_vf(adev))
464 return;
465
466 for (i = 0; i < adev->sdma.num_instances; i++) {
467 f32_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
468 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
469 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), f32_cntl);
470 }
471 }
472
473 /**
474 * sdma_v6_0_gfx_resume_instance - start/restart a certain sdma engine
475 *
476 * @adev: amdgpu_device pointer
477 * @i: instance
478 * @restore: used to restore wptr when restart
479 *
480 * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
481 * Return 0 for success.
482 */
sdma_v6_0_gfx_resume_instance(struct amdgpu_device * adev,int i,bool restore)483 static int sdma_v6_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
484 {
485 struct amdgpu_ring *ring;
486 u32 rb_cntl, ib_cntl;
487 u32 rb_bufsz;
488 u32 doorbell;
489 u32 doorbell_offset;
490 u32 temp;
491 u64 wptr_gpu_addr;
492
493 ring = &adev->sdma.instance[i].ring;
494 if (!amdgpu_sriov_vf(adev))
495 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
496
497 /* Set ring buffer size in dwords */
498 rb_bufsz = order_base_2(ring->ring_size / 4);
499 rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
500 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
501 #ifdef __BIG_ENDIAN
502 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
503 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
504 RPTR_WRITEBACK_SWAP_ENABLE, 1);
505 #endif
506 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
507 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
508
509 /* Initialize the ring buffer's read and write pointers */
510 if (restore) {
511 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2));
512 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
513 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2));
514 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
515 } else {
516 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
517 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
518 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
519 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
520 }
521 /* setup the wptr shadow polling */
522 wptr_gpu_addr = ring->wptr_gpu_addr;
523 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
524 lower_32_bits(wptr_gpu_addr));
525 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
526 upper_32_bits(wptr_gpu_addr));
527
528 /* set the wb address whether it's enabled or not */
529 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
530 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
531 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
532 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
533
534 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
535 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
536 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
537
538 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
539 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
540
541 if (!restore)
542 ring->wptr = 0;
543
544 /* before programing wptr to a less value, need set minor_ptr_update first */
545 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
546
547 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
548 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
549 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
550 }
551
552 doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
553 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
554
555 if (ring->use_doorbell) {
556 doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
557 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
558 OFFSET, ring->doorbell_index);
559 } else {
560 doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
561 }
562 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
563 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
564
565 if (i == 0)
566 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
567 ring->doorbell_index,
568 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
569
570 if (amdgpu_sriov_vf(adev))
571 sdma_v6_0_ring_set_wptr(ring);
572
573 /* set minor_ptr_update to 0 after wptr programed */
574 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
575
576 /* Set up sdma hang watchdog */
577 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
578 /* 100ms per unit */
579 temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
580 max(adev->usec_timeout/100000, 1));
581 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
582
583 /* Set up RESP_MODE to non-copy addresses */
584 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
585 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
586 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
587 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
588
589 /* program default cache read and write policy */
590 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
591 /* clean read policy and write policy bits */
592 temp &= 0xFF0FFF;
593 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
594 (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
595 SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
596 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
597
598 if (!amdgpu_sriov_vf(adev)) {
599 /* unhalt engine */
600 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
601 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
602 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
603 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
604 }
605
606 /* enable DMA RB */
607 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
608 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
609
610 ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
611 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
612 #ifdef __BIG_ENDIAN
613 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
614 #endif
615 /* enable DMA IBs */
616 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
617
618 if (amdgpu_sriov_vf(adev))
619 sdma_v6_0_enable(adev, true);
620
621 return amdgpu_ring_test_helper(ring);
622 }
623
624 /**
625 * sdma_v6_0_gfx_resume - setup and start the async dma engines
626 *
627 * @adev: amdgpu_device pointer
628 *
629 * Set up the gfx DMA ring buffers and enable them.
630 * Returns 0 for success, error for failure.
631 */
sdma_v6_0_gfx_resume(struct amdgpu_device * adev)632 static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
633 {
634 int i, r;
635
636 for (i = 0; i < adev->sdma.num_instances; i++) {
637 r = sdma_v6_0_gfx_resume_instance(adev, i, false);
638 if (r)
639 return r;
640 }
641
642 return 0;
643 }
644
645 /**
646 * sdma_v6_0_rlc_resume - setup and start the async dma engines
647 *
648 * @adev: amdgpu_device pointer
649 *
650 * Set up the compute DMA queues and enable them.
651 * Returns 0 for success, error for failure.
652 */
sdma_v6_0_rlc_resume(struct amdgpu_device * adev)653 static int sdma_v6_0_rlc_resume(struct amdgpu_device *adev)
654 {
655 return 0;
656 }
657
658 /**
659 * sdma_v6_0_load_microcode - load the sDMA ME ucode
660 *
661 * @adev: amdgpu_device pointer
662 *
663 * Loads the sDMA0/1 ucode.
664 * Returns 0 for success, -EINVAL if the ucode is not available.
665 */
sdma_v6_0_load_microcode(struct amdgpu_device * adev)666 static int sdma_v6_0_load_microcode(struct amdgpu_device *adev)
667 {
668 const struct sdma_firmware_header_v2_0 *hdr;
669 const __le32 *fw_data;
670 u32 fw_size;
671 int i, j;
672 bool use_broadcast;
673
674 /* halt the MEs */
675 sdma_v6_0_enable(adev, false);
676
677 if (!adev->sdma.instance[0].fw)
678 return -EINVAL;
679
680 /* use broadcast mode to load SDMA microcode by default */
681 use_broadcast = true;
682
683 if (use_broadcast) {
684 dev_info(adev->dev, "Use broadcast method to load SDMA firmware\n");
685 /* load Control Thread microcode */
686 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
687 amdgpu_ucode_print_sdma_hdr(&hdr->header);
688 fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4;
689
690 fw_data = (const __le32 *)
691 (adev->sdma.instance[0].fw->data +
692 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
693
694 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0);
695
696 for (j = 0; j < fw_size; j++) {
697 if (amdgpu_emu_mode == 1 && j % 500 == 0)
698 msleep(1);
699 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++));
700 }
701
702 /* load Context Switch microcode */
703 fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4;
704
705 fw_data = (const __le32 *)
706 (adev->sdma.instance[0].fw->data +
707 le32_to_cpu(hdr->ctl_ucode_offset));
708
709 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0x8000);
710
711 for (j = 0; j < fw_size; j++) {
712 if (amdgpu_emu_mode == 1 && j % 500 == 0)
713 msleep(1);
714 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++));
715 }
716 } else {
717 dev_info(adev->dev, "Use legacy method to load SDMA firmware\n");
718 for (i = 0; i < adev->sdma.num_instances; i++) {
719 /* load Control Thread microcode */
720 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
721 amdgpu_ucode_print_sdma_hdr(&hdr->header);
722 fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4;
723
724 fw_data = (const __le32 *)
725 (adev->sdma.instance[0].fw->data +
726 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
727
728 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0);
729
730 for (j = 0; j < fw_size; j++) {
731 if (amdgpu_emu_mode == 1 && j % 500 == 0)
732 msleep(1);
733 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
734 }
735
736 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version);
737
738 /* load Context Switch microcode */
739 fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4;
740
741 fw_data = (const __le32 *)
742 (adev->sdma.instance[0].fw->data +
743 le32_to_cpu(hdr->ctl_ucode_offset));
744
745 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0x8000);
746
747 for (j = 0; j < fw_size; j++) {
748 if (amdgpu_emu_mode == 1 && j % 500 == 0)
749 msleep(1);
750 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
751 }
752
753 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version);
754 }
755 }
756
757 return 0;
758 }
759
sdma_v6_0_soft_reset(struct amdgpu_ip_block * ip_block)760 static int sdma_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
761 {
762 struct amdgpu_device *adev = ip_block->adev;
763 u32 tmp;
764 int i;
765
766 sdma_v6_0_gfx_stop(adev);
767
768 for (i = 0; i < adev->sdma.num_instances; i++) {
769 tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE));
770 tmp |= SDMA0_FREEZE__FREEZE_MASK;
771 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE), tmp);
772 tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
773 tmp |= SDMA0_F32_CNTL__HALT_MASK;
774 tmp |= SDMA0_F32_CNTL__TH1_RESET_MASK;
775 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), tmp);
776
777 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_PREEMPT), 0);
778
779 udelay(100);
780
781 tmp = GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK << i;
782 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
783 tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
784
785 udelay(100);
786
787 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, 0);
788 tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
789
790 udelay(100);
791 }
792
793 return sdma_v6_0_start(adev);
794 }
795
sdma_v6_0_check_soft_reset(struct amdgpu_ip_block * ip_block)796 static bool sdma_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
797 {
798 struct amdgpu_device *adev = ip_block->adev;
799 struct amdgpu_ring *ring;
800 int i, r;
801 long tmo = msecs_to_jiffies(1000);
802
803 for (i = 0; i < adev->sdma.num_instances; i++) {
804 ring = &adev->sdma.instance[i].ring;
805 r = amdgpu_ring_test_ib(ring, tmo);
806 if (r)
807 return true;
808 }
809
810 return false;
811 }
812
813 /**
814 * sdma_v6_0_start - setup and start the async dma engines
815 *
816 * @adev: amdgpu_device pointer
817 *
818 * Set up the DMA engines and enable them.
819 * Returns 0 for success, error for failure.
820 */
sdma_v6_0_start(struct amdgpu_device * adev)821 static int sdma_v6_0_start(struct amdgpu_device *adev)
822 {
823 int r = 0;
824
825 if (amdgpu_sriov_vf(adev)) {
826 sdma_v6_0_enable(adev, false);
827
828 /* set RB registers */
829 r = sdma_v6_0_gfx_resume(adev);
830 return r;
831 }
832
833 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
834 r = sdma_v6_0_load_microcode(adev);
835 if (r)
836 return r;
837
838 /* The value of regSDMA_F32_CNTL is invalid the moment after loading fw */
839 if (amdgpu_emu_mode == 1)
840 msleep(1000);
841 }
842
843 /* unhalt the MEs */
844 sdma_v6_0_enable(adev, true);
845 /* enable sdma ring preemption */
846 sdma_v6_0_ctxempty_int_enable(adev, true);
847
848 /* start the gfx rings and rlc compute queues */
849 r = sdma_v6_0_gfx_resume(adev);
850 if (r)
851 return r;
852 r = sdma_v6_0_rlc_resume(adev);
853
854 return r;
855 }
856
sdma_v6_0_mqd_init(struct amdgpu_device * adev,void * mqd,struct amdgpu_mqd_prop * prop)857 static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
858 struct amdgpu_mqd_prop *prop)
859 {
860 struct v11_sdma_mqd *m = mqd;
861 uint64_t wb_gpu_addr;
862
863 m->sdmax_rlcx_rb_cntl =
864 order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
865 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
866 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
867 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
868
869 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
870 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
871
872 wb_gpu_addr = prop->wptr_gpu_addr;
873 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
874 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
875
876 wb_gpu_addr = prop->rptr_gpu_addr;
877 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
878 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
879
880 m->sdmax_rlcx_ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, 0,
881 regSDMA0_QUEUE0_IB_CNTL));
882
883 m->sdmax_rlcx_doorbell_offset =
884 prop->doorbell_index << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
885
886 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
887
888 m->sdmax_rlcx_skip_cntl = 0;
889 m->sdmax_rlcx_context_status = 0;
890 m->sdmax_rlcx_doorbell_log = 0;
891
892 m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
893 m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
894
895 m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
896 m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
897
898 m->sdmax_rlcx_f32_dbg0 = lower_32_bits(prop->fence_address);
899 m->sdmax_rlcx_f32_dbg1 = upper_32_bits(prop->fence_address);
900
901 return 0;
902 }
903
sdma_v6_0_set_mqd_funcs(struct amdgpu_device * adev)904 static void sdma_v6_0_set_mqd_funcs(struct amdgpu_device *adev)
905 {
906 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v11_sdma_mqd);
907 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v6_0_mqd_init;
908 }
909
910 /**
911 * sdma_v6_0_ring_test_ring - simple async dma engine test
912 *
913 * @ring: amdgpu_ring structure holding ring information
914 *
915 * Test the DMA engine by writing using it to write an
916 * value to memory.
917 * Returns 0 for success, error for failure.
918 */
sdma_v6_0_ring_test_ring(struct amdgpu_ring * ring)919 static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
920 {
921 struct amdgpu_device *adev = ring->adev;
922 unsigned i;
923 unsigned index;
924 int r;
925 u32 tmp;
926 u64 gpu_addr;
927
928 tmp = 0xCAFEDEAD;
929
930 r = amdgpu_device_wb_get(adev, &index);
931 if (r) {
932 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
933 return r;
934 }
935
936 gpu_addr = adev->wb.gpu_addr + (index * 4);
937 adev->wb.wb[index] = cpu_to_le32(tmp);
938
939 r = amdgpu_ring_alloc(ring, 5);
940 if (r) {
941 drm_err(adev_to_drm(adev), "dma failed to lock ring %d (%d).\n", ring->idx, r);
942 amdgpu_device_wb_free(adev, index);
943 return r;
944 }
945
946 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
947 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
948 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
949 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
950 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
951 amdgpu_ring_write(ring, 0xDEADBEEF);
952 amdgpu_ring_commit(ring);
953
954 for (i = 0; i < adev->usec_timeout; i++) {
955 tmp = le32_to_cpu(adev->wb.wb[index]);
956 if (tmp == 0xDEADBEEF)
957 break;
958 if (amdgpu_emu_mode == 1)
959 msleep(1);
960 else
961 udelay(1);
962 }
963
964 if (i >= adev->usec_timeout)
965 r = -ETIMEDOUT;
966
967 amdgpu_device_wb_free(adev, index);
968
969 return r;
970 }
971
972 /*
973 * sdma_v6_0_ring_test_ib - test an IB on the DMA engine
974 *
975 * @ring: amdgpu_ring structure holding ring information
976 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
977 *
978 * Test a simple IB in the DMA ring.
979 * Returns 0 on success, error on failure.
980 */
sdma_v6_0_ring_test_ib(struct amdgpu_ring * ring,long timeout)981 static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
982 {
983 struct amdgpu_device *adev = ring->adev;
984 struct amdgpu_ib ib;
985 struct dma_fence *f = NULL;
986 unsigned index;
987 long r;
988 u32 tmp = 0;
989 u64 gpu_addr;
990
991 tmp = 0xCAFEDEAD;
992 memset(&ib, 0, sizeof(ib));
993
994 r = amdgpu_device_wb_get(adev, &index);
995 if (r) {
996 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
997 return r;
998 }
999
1000 gpu_addr = adev->wb.gpu_addr + (index * 4);
1001 adev->wb.wb[index] = cpu_to_le32(tmp);
1002
1003 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
1004 if (r) {
1005 drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r);
1006 goto err0;
1007 }
1008
1009 ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
1010 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1011 ib.ptr[1] = lower_32_bits(gpu_addr);
1012 ib.ptr[2] = upper_32_bits(gpu_addr);
1013 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1014 ib.ptr[4] = 0xDEADBEEF;
1015 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1016 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1017 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1018 ib.length_dw = 8;
1019
1020 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1021 if (r)
1022 goto err1;
1023
1024 r = dma_fence_wait_timeout(f, false, timeout);
1025 if (r == 0) {
1026 drm_err(adev_to_drm(adev), "IB test timed out\n");
1027 r = -ETIMEDOUT;
1028 goto err1;
1029 } else if (r < 0) {
1030 drm_err(adev_to_drm(adev), "fence wait failed (%ld).\n", r);
1031 goto err1;
1032 }
1033
1034 tmp = le32_to_cpu(adev->wb.wb[index]);
1035
1036 if (tmp == 0xDEADBEEF)
1037 r = 0;
1038 else
1039 r = -EINVAL;
1040
1041 err1:
1042 amdgpu_ib_free(&ib, NULL);
1043 dma_fence_put(f);
1044 err0:
1045 amdgpu_device_wb_free(adev, index);
1046 return r;
1047 }
1048
1049
1050 /**
1051 * sdma_v6_0_vm_copy_pte - update PTEs by copying them from the GART
1052 *
1053 * @ib: indirect buffer to fill with commands
1054 * @pe: addr of the page entry
1055 * @src: src addr to copy from
1056 * @count: number of page entries to update
1057 *
1058 * Update PTEs by copying them from the GART using sDMA.
1059 */
sdma_v6_0_vm_copy_pte(struct amdgpu_ib * ib,uint64_t pe,uint64_t src,unsigned count)1060 static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib,
1061 uint64_t pe, uint64_t src,
1062 unsigned count)
1063 {
1064 unsigned bytes = count * 8;
1065
1066 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
1067 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1068 ib->ptr[ib->length_dw++] = bytes - 1;
1069 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1070 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1071 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1072 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1073 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1074
1075 }
1076
1077 /**
1078 * sdma_v6_0_vm_write_pte - update PTEs by writing them manually
1079 *
1080 * @ib: indirect buffer to fill with commands
1081 * @pe: addr of the page entry
1082 * @value: dst addr to write into pe
1083 * @count: number of page entries to update
1084 * @incr: increase next addr by incr bytes
1085 *
1086 * Update PTEs by writing them manually using sDMA.
1087 */
sdma_v6_0_vm_write_pte(struct amdgpu_ib * ib,uint64_t pe,uint64_t value,unsigned count,uint32_t incr)1088 static void sdma_v6_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1089 uint64_t value, unsigned count,
1090 uint32_t incr)
1091 {
1092 unsigned ndw = count * 2;
1093
1094 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
1095 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1096 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1097 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1098 ib->ptr[ib->length_dw++] = ndw - 1;
1099 for (; ndw > 0; ndw -= 2) {
1100 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1101 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1102 value += incr;
1103 }
1104 }
1105
1106 /**
1107 * sdma_v6_0_vm_set_pte_pde - update the page tables using sDMA
1108 *
1109 * @ib: indirect buffer to fill with commands
1110 * @pe: addr of the page entry
1111 * @addr: dst addr to write into pe
1112 * @count: number of page entries to update
1113 * @incr: increase next addr by incr bytes
1114 * @flags: access flags
1115 *
1116 * Update the page tables using sDMA.
1117 */
sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)1118 static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1119 uint64_t pe,
1120 uint64_t addr, unsigned count,
1121 uint32_t incr, uint64_t flags)
1122 {
1123 /* for physically contiguous pages (vram) */
1124 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_PTEPDE);
1125 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1126 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1127 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1128 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1129 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1130 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1131 ib->ptr[ib->length_dw++] = incr; /* increment size */
1132 ib->ptr[ib->length_dw++] = 0;
1133 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1134 }
1135
1136 /*
1137 * sdma_v6_0_ring_pad_ib - pad the IB
1138 * @ib: indirect buffer to fill with padding
1139 * @ring: amdgpu ring pointer
1140 *
1141 * Pad the IB with NOPs to a boundary multiple of 8.
1142 */
sdma_v6_0_ring_pad_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib)1143 static void sdma_v6_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1144 {
1145 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1146 u32 pad_count;
1147 int i;
1148
1149 pad_count = (-ib->length_dw) & 0x7;
1150 for (i = 0; i < pad_count; i++)
1151 if (sdma && sdma->burst_nop && (i == 0))
1152 ib->ptr[ib->length_dw++] =
1153 SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP) |
1154 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1155 else
1156 ib->ptr[ib->length_dw++] =
1157 SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP);
1158 }
1159
1160 /**
1161 * sdma_v6_0_ring_emit_pipeline_sync - sync the pipeline
1162 *
1163 * @ring: amdgpu_ring pointer
1164 *
1165 * Make sure all previous operations are completed (CIK).
1166 */
sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring * ring)1167 static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1168 {
1169 uint32_t seq = ring->fence_drv.sync_seq;
1170 uint64_t addr = ring->fence_drv.gpu_addr;
1171
1172 /* wait for idle */
1173 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1174 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1175 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1176 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1177 amdgpu_ring_write(ring, addr & 0xfffffffc);
1178 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1179 amdgpu_ring_write(ring, seq); /* reference */
1180 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1181 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1182 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1183 }
1184
1185 /*
1186 * sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA
1187 *
1188 * @ring: amdgpu_ring pointer
1189 * @vmid: vmid number to use
1190 * @pd_addr: address
1191 *
1192 * Update the page table base and flush the VM TLB
1193 * using sDMA.
1194 */
sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1195 static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1196 unsigned vmid, uint64_t pd_addr)
1197 {
1198 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1199 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
1200
1201 /* Update the PD address for this VMID. */
1202 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
1203 (hub->ctx_addr_distance * vmid),
1204 lower_32_bits(pd_addr));
1205 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1206 (hub->ctx_addr_distance * vmid),
1207 upper_32_bits(pd_addr));
1208
1209 /* Trigger invalidation. */
1210 amdgpu_ring_write(ring,
1211 SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1212 SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) |
1213 SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) |
1214 SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f));
1215 amdgpu_ring_write(ring, req);
1216 amdgpu_ring_write(ring, 0xFFFFFFFF);
1217 amdgpu_ring_write(ring,
1218 SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) |
1219 SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F));
1220 }
1221
sdma_v6_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1222 static void sdma_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1223 uint32_t reg, uint32_t val)
1224 {
1225 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1226 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1227 amdgpu_ring_write(ring, reg);
1228 amdgpu_ring_write(ring, val);
1229 }
1230
sdma_v6_0_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1231 static void sdma_v6_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1232 uint32_t val, uint32_t mask)
1233 {
1234 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1235 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1236 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1237 amdgpu_ring_write(ring, reg << 2);
1238 amdgpu_ring_write(ring, 0);
1239 amdgpu_ring_write(ring, val); /* reference */
1240 amdgpu_ring_write(ring, mask); /* mask */
1241 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1242 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1243 }
1244
sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)1245 static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1246 uint32_t reg0, uint32_t reg1,
1247 uint32_t ref, uint32_t mask)
1248 {
1249 amdgpu_ring_emit_wreg(ring, reg0, ref);
1250 /* wait for a cycle to reset vm_inv_eng*_ack */
1251 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1252 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1253 }
1254
1255 static struct amdgpu_sdma_ras sdma_v6_0_3_ras = {
1256 .ras_block = {
1257 .ras_late_init = amdgpu_ras_block_late_init,
1258 },
1259 };
1260
sdma_v6_0_set_ras_funcs(struct amdgpu_device * adev)1261 static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)
1262 {
1263 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1264 case IP_VERSION(6, 0, 3):
1265 adev->sdma.ras = &sdma_v6_0_3_ras;
1266 break;
1267 default:
1268 break;
1269 }
1270 }
1271
1272 /* all sizes are in bytes */
1273 #define SDMA6_CSA_SIZE 32
1274 #define SDMA6_CSA_ALIGNMENT 4
1275
sdma_v6_0_get_csa_info(struct amdgpu_device * adev,struct amdgpu_sdma_csa_info * csa_info)1276 static void sdma_v6_0_get_csa_info(struct amdgpu_device *adev,
1277 struct amdgpu_sdma_csa_info *csa_info)
1278 {
1279 csa_info->size = SDMA6_CSA_SIZE;
1280 csa_info->alignment = SDMA6_CSA_ALIGNMENT;
1281 }
1282
sdma_v6_0_early_init(struct amdgpu_ip_block * ip_block)1283 static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
1284 {
1285 struct amdgpu_device *adev = ip_block->adev;
1286 int r;
1287
1288 switch (amdgpu_user_queue) {
1289 case -1:
1290 case 0:
1291 default:
1292 adev->sdma.no_user_submission = false;
1293 adev->sdma.disable_uq = true;
1294 break;
1295 case 1:
1296 adev->sdma.no_user_submission = false;
1297 adev->sdma.disable_uq = false;
1298 break;
1299 case 2:
1300 adev->sdma.no_user_submission = true;
1301 adev->sdma.disable_uq = false;
1302 break;
1303 }
1304
1305 r = amdgpu_sdma_init_microcode(adev, 0, true);
1306 if (r)
1307 return r;
1308
1309 sdma_v6_0_set_ring_funcs(adev);
1310 sdma_v6_0_set_buffer_funcs(adev);
1311 sdma_v6_0_set_vm_pte_funcs(adev);
1312 sdma_v6_0_set_irq_funcs(adev);
1313 sdma_v6_0_set_mqd_funcs(adev);
1314 sdma_v6_0_set_ras_funcs(adev);
1315 adev->sdma.get_csa_info = &sdma_v6_0_get_csa_info;
1316
1317 return 0;
1318 }
1319
sdma_v6_0_sw_init(struct amdgpu_ip_block * ip_block)1320 static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
1321 {
1322 struct amdgpu_ring *ring;
1323 int r, i;
1324 struct amdgpu_device *adev = ip_block->adev;
1325 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
1326 uint32_t *ptr;
1327
1328 /* SDMA trap event */
1329 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1330 GFX_11_0_0__SRCID__SDMA_TRAP,
1331 &adev->sdma.trap_irq);
1332 if (r)
1333 return r;
1334
1335 /* SDMA user fence event */
1336 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1337 GFX_11_0_0__SRCID__SDMA_FENCE,
1338 &adev->sdma.fence_irq);
1339 if (r)
1340 return r;
1341
1342 for (i = 0; i < adev->sdma.num_instances; i++) {
1343 ring = &adev->sdma.instance[i].ring;
1344 ring->ring_obj = NULL;
1345 ring->use_doorbell = true;
1346 ring->me = i;
1347 ring->no_user_submission = adev->sdma.no_user_submission;
1348
1349 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1350 ring->use_doorbell?"true":"false");
1351
1352 ring->doorbell_index =
1353 (adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset
1354
1355 ring->vm_hub = AMDGPU_GFXHUB(0);
1356 sprintf(ring->name, "sdma%d", i);
1357 r = amdgpu_ring_init(adev, ring, 1024,
1358 &adev->sdma.trap_irq,
1359 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1360 AMDGPU_RING_PRIO_DEFAULT, NULL);
1361 if (r)
1362 return r;
1363 }
1364
1365 adev->sdma.supported_reset =
1366 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
1367 if (!amdgpu_sriov_vf(adev) &&
1368 !adev->debug_disable_gpu_ring_reset)
1369 adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1370
1371 if (amdgpu_sdma_ras_sw_init(adev)) {
1372 dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
1373 return -EINVAL;
1374 }
1375
1376 /* Allocate memory for SDMA IP Dump buffer */
1377 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
1378 if (ptr)
1379 adev->sdma.ip_dump = ptr;
1380 else
1381 DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
1382
1383 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1384 case IP_VERSION(6, 0, 0):
1385 if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
1386 adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
1387 break;
1388 case IP_VERSION(6, 0, 1):
1389 if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq)
1390 adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
1391 break;
1392 case IP_VERSION(6, 0, 2):
1393 if ((adev->sdma.instance[0].fw_version >= 23) && !adev->sdma.disable_uq)
1394 adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
1395 break;
1396 case IP_VERSION(6, 0, 3):
1397 if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq)
1398 adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
1399 break;
1400 case IP_VERSION(6, 1, 0):
1401 if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq)
1402 adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
1403 break;
1404 case IP_VERSION(6, 1, 1):
1405 if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq)
1406 adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
1407 break;
1408 case IP_VERSION(6, 1, 2):
1409 if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq)
1410 adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
1411 break;
1412 case IP_VERSION(6, 1, 3):
1413 if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq)
1414 adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
1415 break;
1416 default:
1417 break;
1418 }
1419
1420 r = amdgpu_sdma_sysfs_reset_mask_init(adev);
1421 if (r)
1422 return r;
1423
1424 return r;
1425 }
1426
sdma_v6_0_sw_fini(struct amdgpu_ip_block * ip_block)1427 static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
1428 {
1429 struct amdgpu_device *adev = ip_block->adev;
1430 int i;
1431
1432 for (i = 0; i < adev->sdma.num_instances; i++)
1433 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1434
1435 amdgpu_sdma_sysfs_reset_mask_fini(adev);
1436 amdgpu_sdma_destroy_inst_ctx(adev, true);
1437
1438 kfree(adev->sdma.ip_dump);
1439
1440 return 0;
1441 }
1442
sdma_v6_0_set_userq_trap_interrupts(struct amdgpu_device * adev,bool enable)1443 static int sdma_v6_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
1444 bool enable)
1445 {
1446 unsigned int irq_type;
1447 int i, r;
1448
1449 if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
1450 for (i = 0; i < adev->sdma.num_instances; i++) {
1451 irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
1452 if (enable)
1453 r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
1454 irq_type);
1455 else
1456 r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
1457 irq_type);
1458 if (r)
1459 return r;
1460 }
1461 }
1462
1463 return 0;
1464 }
1465
sdma_v6_0_hw_init(struct amdgpu_ip_block * ip_block)1466 static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
1467 {
1468 struct amdgpu_device *adev = ip_block->adev;
1469 int r;
1470
1471 r = sdma_v6_0_start(adev);
1472 if (r)
1473 return r;
1474
1475 return sdma_v6_0_set_userq_trap_interrupts(adev, true);
1476 }
1477
sdma_v6_0_hw_fini(struct amdgpu_ip_block * ip_block)1478 static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
1479 {
1480 struct amdgpu_device *adev = ip_block->adev;
1481
1482 if (amdgpu_sriov_vf(adev))
1483 return 0;
1484
1485 sdma_v6_0_ctxempty_int_enable(adev, false);
1486 sdma_v6_0_enable(adev, false);
1487 sdma_v6_0_set_userq_trap_interrupts(adev, false);
1488
1489 return 0;
1490 }
1491
sdma_v6_0_suspend(struct amdgpu_ip_block * ip_block)1492 static int sdma_v6_0_suspend(struct amdgpu_ip_block *ip_block)
1493 {
1494 return sdma_v6_0_hw_fini(ip_block);
1495 }
1496
sdma_v6_0_resume(struct amdgpu_ip_block * ip_block)1497 static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block)
1498 {
1499 return sdma_v6_0_hw_init(ip_block);
1500 }
1501
sdma_v6_0_is_idle(struct amdgpu_ip_block * ip_block)1502 static bool sdma_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
1503 {
1504 struct amdgpu_device *adev = ip_block->adev;
1505 u32 i;
1506
1507 for (i = 0; i < adev->sdma.num_instances; i++) {
1508 u32 tmp = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_STATUS_REG));
1509
1510 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1511 return false;
1512 }
1513
1514 return true;
1515 }
1516
sdma_v6_0_wait_for_idle(struct amdgpu_ip_block * ip_block)1517 static int sdma_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1518 {
1519 unsigned i;
1520 u32 sdma0, sdma1;
1521 struct amdgpu_device *adev = ip_block->adev;
1522
1523 for (i = 0; i < adev->usec_timeout; i++) {
1524 sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
1525 sdma1 = RREG32(sdma_v6_0_get_reg_offset(adev, 1, regSDMA0_STATUS_REG));
1526
1527 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1528 return 0;
1529 udelay(1);
1530 }
1531 return -ETIMEDOUT;
1532 }
1533
sdma_v6_0_ring_preempt_ib(struct amdgpu_ring * ring)1534 static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
1535 {
1536 int i, r = 0;
1537 struct amdgpu_device *adev = ring->adev;
1538 u32 index = 0;
1539 u64 sdma_gfx_preempt;
1540
1541 amdgpu_sdma_get_index_from_ring(ring, &index);
1542 sdma_gfx_preempt =
1543 sdma_v6_0_get_reg_offset(adev, index, regSDMA0_QUEUE0_PREEMPT);
1544
1545 /* assert preemption condition */
1546 amdgpu_ring_set_preempt_cond_exec(ring, false);
1547
1548 /* emit the trailing fence */
1549 ring->trail_seq += 1;
1550 amdgpu_ring_alloc(ring, 10);
1551 sdma_v6_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1552 ring->trail_seq, 0);
1553 amdgpu_ring_commit(ring);
1554
1555 /* assert IB preemption */
1556 WREG32(sdma_gfx_preempt, 1);
1557
1558 /* poll the trailing fence */
1559 for (i = 0; i < adev->usec_timeout; i++) {
1560 if (ring->trail_seq ==
1561 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1562 break;
1563 udelay(1);
1564 }
1565
1566 if (i >= adev->usec_timeout) {
1567 r = -EINVAL;
1568 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1569 }
1570
1571 /* deassert IB preemption */
1572 WREG32(sdma_gfx_preempt, 0);
1573
1574 /* deassert the preemption condition */
1575 amdgpu_ring_set_preempt_cond_exec(ring, true);
1576 return r;
1577 }
1578
sdma_v6_0_reset_queue(struct amdgpu_ring * ring,unsigned int vmid,struct amdgpu_fence * timedout_fence)1579 static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring,
1580 unsigned int vmid,
1581 struct amdgpu_fence *timedout_fence)
1582 {
1583 struct amdgpu_device *adev = ring->adev;
1584 int r;
1585
1586 if (ring->me >= adev->sdma.num_instances) {
1587 dev_err(adev->dev, "sdma instance not found\n");
1588 return -EINVAL;
1589 }
1590
1591 amdgpu_ring_reset_helper_begin(ring, timedout_fence);
1592
1593 r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true, 0);
1594 if (r)
1595 return r;
1596
1597 r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true);
1598 if (r)
1599 return r;
1600
1601 return amdgpu_ring_reset_helper_end(ring, timedout_fence);
1602 }
1603
sdma_v6_0_set_trap_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1604 static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
1605 struct amdgpu_irq_src *source,
1606 unsigned type,
1607 enum amdgpu_interrupt_state state)
1608 {
1609 u32 sdma_cntl;
1610
1611 u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL);
1612
1613 if (!amdgpu_sriov_vf(adev)) {
1614 sdma_cntl = RREG32(reg_offset);
1615 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1616 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1617 WREG32(reg_offset, sdma_cntl);
1618 }
1619
1620 return 0;
1621 }
1622
sdma_v6_0_process_trap_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1623 static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
1624 struct amdgpu_irq_src *source,
1625 struct amdgpu_iv_entry *entry)
1626 {
1627 int instances, queue;
1628
1629 DRM_DEBUG("IH: SDMA trap\n");
1630
1631 queue = entry->ring_id & 0xf;
1632 instances = (entry->ring_id & 0xf0) >> 4;
1633 if (instances > 1) {
1634 DRM_ERROR("IH: wrong ring_ID detected, as wrong sdma instance\n");
1635 return -EINVAL;
1636 }
1637
1638 switch (entry->client_id) {
1639 case SOC21_IH_CLIENTID_GFX:
1640 switch (queue) {
1641 case 0:
1642 amdgpu_fence_process(&adev->sdma.instance[instances].ring);
1643 break;
1644 default:
1645 break;
1646 }
1647 break;
1648 }
1649 return 0;
1650 }
1651
sdma_v6_0_process_fence_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1652 static int sdma_v6_0_process_fence_irq(struct amdgpu_device *adev,
1653 struct amdgpu_irq_src *source,
1654 struct amdgpu_iv_entry *entry)
1655 {
1656 u32 doorbell_offset = entry->src_data[0];
1657
1658 if (adev->enable_mes && doorbell_offset) {
1659 struct amdgpu_userq_fence_driver *fence_drv = NULL;
1660 struct xarray *xa = &adev->userq_xa;
1661 unsigned long flags;
1662
1663 doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
1664
1665 xa_lock_irqsave(xa, flags);
1666 fence_drv = xa_load(xa, doorbell_offset);
1667 if (fence_drv)
1668 amdgpu_userq_fence_driver_process(fence_drv);
1669 xa_unlock_irqrestore(xa, flags);
1670 }
1671
1672 return 0;
1673 }
1674
sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1675 static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1676 struct amdgpu_irq_src *source,
1677 struct amdgpu_iv_entry *entry)
1678 {
1679 return 0;
1680 }
1681
sdma_v6_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1682 static int sdma_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1683 enum amd_clockgating_state state)
1684 {
1685 return 0;
1686 }
1687
sdma_v6_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)1688 static int sdma_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1689 enum amd_powergating_state state)
1690 {
1691 return 0;
1692 }
1693
sdma_v6_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)1694 static void sdma_v6_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1695 {
1696 }
1697
sdma_v6_0_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)1698 static void sdma_v6_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1699 {
1700 struct amdgpu_device *adev = ip_block->adev;
1701 int i, j;
1702 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
1703 uint32_t instance_offset;
1704
1705 if (!adev->sdma.ip_dump)
1706 return;
1707
1708 drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
1709 for (i = 0; i < adev->sdma.num_instances; i++) {
1710 instance_offset = i * reg_count;
1711 drm_printf(p, "\nInstance:%d\n", i);
1712
1713 for (j = 0; j < reg_count; j++)
1714 drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_6_0[j].reg_name,
1715 adev->sdma.ip_dump[instance_offset + j]);
1716 }
1717 }
1718
sdma_v6_0_dump_ip_state(struct amdgpu_ip_block * ip_block)1719 static void sdma_v6_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
1720 {
1721 struct amdgpu_device *adev = ip_block->adev;
1722 int i, j;
1723 uint32_t instance_offset;
1724 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
1725
1726 if (!adev->sdma.ip_dump)
1727 return;
1728
1729 amdgpu_gfx_off_ctrl(adev, false);
1730 for (i = 0; i < adev->sdma.num_instances; i++) {
1731 instance_offset = i * reg_count;
1732 for (j = 0; j < reg_count; j++)
1733 adev->sdma.ip_dump[instance_offset + j] =
1734 RREG32(sdma_v6_0_get_reg_offset(adev, i,
1735 sdma_reg_list_6_0[j].reg_offset));
1736 }
1737 amdgpu_gfx_off_ctrl(adev, true);
1738 }
1739
1740 const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
1741 .name = "sdma_v6_0",
1742 .early_init = sdma_v6_0_early_init,
1743 .sw_init = sdma_v6_0_sw_init,
1744 .sw_fini = sdma_v6_0_sw_fini,
1745 .hw_init = sdma_v6_0_hw_init,
1746 .hw_fini = sdma_v6_0_hw_fini,
1747 .suspend = sdma_v6_0_suspend,
1748 .resume = sdma_v6_0_resume,
1749 .is_idle = sdma_v6_0_is_idle,
1750 .wait_for_idle = sdma_v6_0_wait_for_idle,
1751 .soft_reset = sdma_v6_0_soft_reset,
1752 .check_soft_reset = sdma_v6_0_check_soft_reset,
1753 .set_clockgating_state = sdma_v6_0_set_clockgating_state,
1754 .set_powergating_state = sdma_v6_0_set_powergating_state,
1755 .get_clockgating_state = sdma_v6_0_get_clockgating_state,
1756 .dump_ip_state = sdma_v6_0_dump_ip_state,
1757 .print_ip_state = sdma_v6_0_print_ip_state,
1758 };
1759
1760 static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
1761 .type = AMDGPU_RING_TYPE_SDMA,
1762 .align_mask = 0xf,
1763 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1764 .support_64bit_ptrs = true,
1765 .secure_submission_supported = true,
1766 .get_rptr = sdma_v6_0_ring_get_rptr,
1767 .get_wptr = sdma_v6_0_ring_get_wptr,
1768 .set_wptr = sdma_v6_0_ring_set_wptr,
1769 .emit_frame_size =
1770 5 + /* sdma_v6_0_ring_init_cond_exec */
1771 6 + /* sdma_v6_0_ring_emit_hdp_flush */
1772 6 + /* sdma_v6_0_ring_emit_pipeline_sync */
1773 /* sdma_v6_0_ring_emit_vm_flush */
1774 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1775 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1776 10 + 10 + 10, /* sdma_v6_0_ring_emit_fence x3 for user fence, vm fence */
1777 .emit_ib_size = 5 + 7 + 6, /* sdma_v6_0_ring_emit_ib */
1778 .emit_ib = sdma_v6_0_ring_emit_ib,
1779 .emit_mem_sync = sdma_v6_0_ring_emit_mem_sync,
1780 .emit_fence = sdma_v6_0_ring_emit_fence,
1781 .emit_pipeline_sync = sdma_v6_0_ring_emit_pipeline_sync,
1782 .emit_vm_flush = sdma_v6_0_ring_emit_vm_flush,
1783 .emit_hdp_flush = sdma_v6_0_ring_emit_hdp_flush,
1784 .test_ring = sdma_v6_0_ring_test_ring,
1785 .test_ib = sdma_v6_0_ring_test_ib,
1786 .insert_nop = sdma_v6_0_ring_insert_nop,
1787 .pad_ib = sdma_v6_0_ring_pad_ib,
1788 .emit_wreg = sdma_v6_0_ring_emit_wreg,
1789 .emit_reg_wait = sdma_v6_0_ring_emit_reg_wait,
1790 .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait,
1791 .init_cond_exec = sdma_v6_0_ring_init_cond_exec,
1792 .preempt_ib = sdma_v6_0_ring_preempt_ib,
1793 .reset = sdma_v6_0_reset_queue,
1794 };
1795
sdma_v6_0_set_ring_funcs(struct amdgpu_device * adev)1796 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1797 {
1798 int i;
1799
1800 for (i = 0; i < adev->sdma.num_instances; i++) {
1801 adev->sdma.instance[i].ring.funcs = &sdma_v6_0_ring_funcs;
1802 adev->sdma.instance[i].ring.me = i;
1803 }
1804 }
1805
1806 static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = {
1807 .set = sdma_v6_0_set_trap_irq_state,
1808 .process = sdma_v6_0_process_trap_irq,
1809 };
1810
1811 static const struct amdgpu_irq_src_funcs sdma_v6_0_fence_irq_funcs = {
1812 .process = sdma_v6_0_process_fence_irq,
1813 };
1814
1815 static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = {
1816 .process = sdma_v6_0_process_illegal_inst_irq,
1817 };
1818
sdma_v6_0_set_irq_funcs(struct amdgpu_device * adev)1819 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1820 {
1821 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1822 adev->sdma.num_instances;
1823 adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs;
1824 adev->sdma.fence_irq.funcs = &sdma_v6_0_fence_irq_funcs;
1825 adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs;
1826 }
1827
1828 /**
1829 * sdma_v6_0_emit_copy_buffer - copy buffer using the sDMA engine
1830 *
1831 * @ib: indirect buffer to fill with commands
1832 * @src_offset: src GPU address
1833 * @dst_offset: dst GPU address
1834 * @byte_count: number of bytes to xfer
1835 * @copy_flags: copy flags for the buffers
1836 *
1837 * Copy GPU buffers using the DMA engine.
1838 * Used by the amdgpu ttm implementation to move pages if
1839 * registered as the asic copy callback.
1840 */
sdma_v6_0_emit_copy_buffer(struct amdgpu_ib * ib,uint64_t src_offset,uint64_t dst_offset,uint32_t byte_count,uint32_t copy_flags)1841 static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib,
1842 uint64_t src_offset,
1843 uint64_t dst_offset,
1844 uint32_t byte_count,
1845 uint32_t copy_flags)
1846 {
1847 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
1848 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1849 SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
1850 ib->ptr[ib->length_dw++] = byte_count - 1;
1851 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1852 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1853 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1854 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1855 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1856 }
1857
1858 /**
1859 * sdma_v6_0_emit_fill_buffer - fill buffer using the sDMA engine
1860 *
1861 * @ib: indirect buffer to fill
1862 * @src_data: value to write to buffer
1863 * @dst_offset: dst GPU address
1864 * @byte_count: number of bytes to xfer
1865 *
1866 * Fill GPU buffers using the DMA engine.
1867 */
sdma_v6_0_emit_fill_buffer(struct amdgpu_ib * ib,uint32_t src_data,uint64_t dst_offset,uint32_t byte_count)1868 static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib,
1869 uint32_t src_data,
1870 uint64_t dst_offset,
1871 uint32_t byte_count)
1872 {
1873 ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL);
1874 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1875 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1876 ib->ptr[ib->length_dw++] = src_data;
1877 ib->ptr[ib->length_dw++] = byte_count - 1;
1878 }
1879
1880 static const struct amdgpu_buffer_funcs sdma_v6_0_buffer_funcs = {
1881 .copy_max_bytes = 0x400000,
1882 .copy_num_dw = 7,
1883 .emit_copy_buffer = sdma_v6_0_emit_copy_buffer,
1884
1885 .fill_max_bytes = 0x400000,
1886 .fill_num_dw = 5,
1887 .emit_fill_buffer = sdma_v6_0_emit_fill_buffer,
1888 };
1889
sdma_v6_0_set_buffer_funcs(struct amdgpu_device * adev)1890 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev)
1891 {
1892 adev->mman.buffer_funcs = &sdma_v6_0_buffer_funcs;
1893 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1894 }
1895
1896 static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
1897 .copy_pte_num_dw = 7,
1898 .copy_pte = sdma_v6_0_vm_copy_pte,
1899 .write_pte = sdma_v6_0_vm_write_pte,
1900 .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
1901 };
1902
sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device * adev)1903 static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1904 {
1905 unsigned i;
1906
1907 adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs;
1908 for (i = 0; i < adev->sdma.num_instances; i++) {
1909 adev->vm_manager.vm_pte_scheds[i] =
1910 &adev->sdma.instance[i].ring.sched;
1911 }
1912 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1913 }
1914
1915 const struct amdgpu_ip_block_version sdma_v6_0_ip_block = {
1916 .type = AMD_IP_BLOCK_TYPE_SDMA,
1917 .major = 6,
1918 .minor = 0,
1919 .rev = 0,
1920 .funcs = &sdma_v6_0_ip_funcs,
1921 };
1922