xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c (revision 889d55154516ec8f98ea953e8660963f2e29c75d)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/firmware.h>
24 #include <drm/drm_drv.h>
25 
26 #include "amdgpu.h"
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_vpe.h"
29 #include "soc15_common.h"
30 #include "vpe_v6_1.h"
31 
32 #define AMDGPU_CSA_VPE_SIZE 	64
33 /* VPE CSA resides in the 4th page of CSA */
34 #define AMDGPU_CSA_VPE_OFFSET 	(4096 * 3)
35 
36 static void vpe_set_ring_funcs(struct amdgpu_device *adev);
37 
38 int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
39 {
40 	struct amdgpu_firmware_info ucode = {
41 		.ucode_id = AMDGPU_UCODE_ID_VPE,
42 		.mc_addr = adev->vpe.cmdbuf_gpu_addr,
43 		.ucode_size = 8,
44 	};
45 
46 	return psp_execute_ip_fw_load(&adev->psp, &ucode);
47 }
48 
49 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
50 {
51 	struct amdgpu_device *adev = vpe->ring.adev;
52 	const struct vpe_firmware_header_v1_0 *vpe_hdr;
53 	char fw_prefix[32], fw_name[64];
54 	int ret;
55 
56 	amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
57 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", fw_prefix);
58 
59 	ret = amdgpu_ucode_request(adev, &adev->vpe.fw, fw_name);
60 	if (ret)
61 		goto out;
62 
63 	vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
64 	adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version);
65 	adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version);
66 
67 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
68 		struct amdgpu_firmware_info *info;
69 
70 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX];
71 		info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX;
72 		info->fw = adev->vpe.fw;
73 		adev->firmware.fw_size +=
74 			ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
75 
76 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL];
77 		info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL;
78 		info->fw = adev->vpe.fw;
79 		adev->firmware.fw_size +=
80 			ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
81 	}
82 
83 	return 0;
84 out:
85 	dev_err(adev->dev, "fail to initialize vpe microcode\n");
86 	release_firmware(adev->vpe.fw);
87 	adev->vpe.fw = NULL;
88 	return ret;
89 }
90 
91 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe)
92 {
93 	struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
94 	struct amdgpu_ring *ring = &vpe->ring;
95 	int ret;
96 
97 	ring->ring_obj = NULL;
98 	ring->use_doorbell = true;
99 	ring->vm_hub = AMDGPU_MMHUB0(0);
100 	ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1);
101 	snprintf(ring->name, 4, "vpe");
102 
103 	ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0,
104 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
105 	if (ret)
106 		return ret;
107 
108 	return 0;
109 }
110 
111 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
112 {
113 	amdgpu_ring_fini(&vpe->ring);
114 
115 	return 0;
116 }
117 
118 static int vpe_early_init(void *handle)
119 {
120 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
121 	struct amdgpu_vpe *vpe = &adev->vpe;
122 
123 	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
124 	case IP_VERSION(6, 1, 0):
125 		vpe_v6_1_set_funcs(vpe);
126 		break;
127 	default:
128 		return -EINVAL;
129 	}
130 
131 	vpe_set_ring_funcs(adev);
132 	vpe_set_regs(vpe);
133 
134 	return 0;
135 }
136 
137 
138 static int vpe_common_init(struct amdgpu_vpe *vpe)
139 {
140 	struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
141 	int r;
142 
143 	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
144 				    AMDGPU_GEM_DOMAIN_GTT,
145 				    &adev->vpe.cmdbuf_obj,
146 				    &adev->vpe.cmdbuf_gpu_addr,
147 				    (void **)&adev->vpe.cmdbuf_cpu_addr);
148 	if (r) {
149 		dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r);
150 		return r;
151 	}
152 
153 	return 0;
154 }
155 
156 static int vpe_sw_init(void *handle)
157 {
158 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
159 	struct amdgpu_vpe *vpe = &adev->vpe;
160 	int ret;
161 
162 	ret = vpe_common_init(vpe);
163 	if (ret)
164 		goto out;
165 
166 	ret = vpe_irq_init(vpe);
167 	if (ret)
168 		goto out;
169 
170 	ret = vpe_ring_init(vpe);
171 	if (ret)
172 		goto out;
173 
174 	ret = vpe_init_microcode(vpe);
175 	if (ret)
176 		goto out;
177 out:
178 	return ret;
179 }
180 
181 static int vpe_sw_fini(void *handle)
182 {
183 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
184 	struct amdgpu_vpe *vpe = &adev->vpe;
185 
186 	release_firmware(vpe->fw);
187 	vpe->fw = NULL;
188 
189 	vpe_ring_fini(vpe);
190 
191 	amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
192 			      &adev->vpe.cmdbuf_gpu_addr,
193 			      (void **)&adev->vpe.cmdbuf_cpu_addr);
194 
195 	return 0;
196 }
197 
198 static int vpe_hw_init(void *handle)
199 {
200 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
201 	struct amdgpu_vpe *vpe = &adev->vpe;
202 	int ret;
203 
204 	ret = vpe_load_microcode(vpe);
205 	if (ret)
206 		return ret;
207 
208 	ret = vpe_ring_start(vpe);
209 	if (ret)
210 		return ret;
211 
212 	return 0;
213 }
214 
215 static int vpe_hw_fini(void *handle)
216 {
217 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
218 	struct amdgpu_vpe *vpe = &adev->vpe;
219 
220 	vpe_ring_stop(vpe);
221 
222 	return 0;
223 }
224 
225 static int vpe_suspend(void *handle)
226 {
227 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
228 
229 	return vpe_hw_fini(adev);
230 }
231 
232 static int vpe_resume(void *handle)
233 {
234 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235 
236 	return vpe_hw_init(adev);
237 }
238 
239 static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
240 {
241 	int i;
242 
243 	amdgpu_ring_write(ring, ring->funcs->nop |
244 				VPE_CMD_NOP_HEADER_COUNT(count - 1));
245 
246 	for (i = 0; i < count - 1; i++)
247 		amdgpu_ring_write(ring, 0);
248 }
249 
250 static void vpe_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
251 {
252 	uint32_t pad_count;
253 	int i;
254 
255 	pad_count = (-ib->length_dw) & 0x7;
256 
257 	ib->ptr[ib->length_dw++] = ring->funcs->nop |
258 				   VPE_CMD_NOP_HEADER_COUNT(pad_count - 1);
259 
260 	for (i = 0; i < pad_count - 1; i++)
261 		ib->ptr[ib->length_dw++] = 0;
262 
263 }
264 
265 static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid)
266 {
267 	struct amdgpu_device *adev = ring->adev;
268 	uint32_t index = 0;
269 	uint64_t csa_mc_addr;
270 
271 	if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
272 		return 0;
273 
274 	csa_mc_addr = amdgpu_csa_vaddr(adev) + AMDGPU_CSA_VPE_OFFSET +
275 		      index * AMDGPU_CSA_VPE_SIZE;
276 
277 	return csa_mc_addr;
278 }
279 
280 static void vpe_ring_emit_ib(struct amdgpu_ring *ring,
281 			     struct amdgpu_job *job,
282 			     struct amdgpu_ib *ib,
283 			     uint32_t flags)
284 {
285 	uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
286 	uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid);
287 
288 	/* IB packet must end on a 8 DW boundary */
289 	vpe_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
290 
291 	amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) |
292 				VPE_CMD_INDIRECT_HEADER_VMID(vmid & 0xf));
293 
294 	/* base must be 32 byte aligned */
295 	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0);
296 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
297 	amdgpu_ring_write(ring, ib->length_dw);
298 	amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
299 	amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
300 }
301 
302 static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
303 				uint64_t seq, unsigned int flags)
304 {
305 	int i = 0;
306 
307 	do {
308 		/* write the fence */
309 		amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
310 		/* zero in first two bits */
311 		WARN_ON_ONCE(addr & 0x3);
312 		amdgpu_ring_write(ring, lower_32_bits(addr));
313 		amdgpu_ring_write(ring, upper_32_bits(addr));
314 		amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq));
315 		addr += 4;
316 	} while ((flags & AMDGPU_FENCE_FLAG_64BIT) && (i++ < 1));
317 
318 	if (flags & AMDGPU_FENCE_FLAG_INT) {
319 		/* generate an interrupt */
320 		amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0));
321 		amdgpu_ring_write(ring, 0);
322 	}
323 
324 }
325 
326 static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
327 {
328 	uint32_t seq = ring->fence_drv.sync_seq;
329 	uint64_t addr = ring->fence_drv.gpu_addr;
330 
331 	/* wait for idle */
332 	amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
333 				VPE_POLL_REGMEM_SUBOP_REGMEM) |
334 				VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
335 				VPE_CMD_POLL_REGMEM_HEADER_MEM(1));
336 	amdgpu_ring_write(ring, addr & 0xfffffffc);
337 	amdgpu_ring_write(ring, upper_32_bits(addr));
338 	amdgpu_ring_write(ring, seq); /* reference */
339 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
340 	amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
341 				VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4));
342 }
343 
344 static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
345 {
346 	amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0));
347 	amdgpu_ring_write(ring,	reg << 2);
348 	amdgpu_ring_write(ring, val);
349 }
350 
351 static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
352 				   uint32_t val, uint32_t mask)
353 {
354 	amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
355 				VPE_POLL_REGMEM_SUBOP_REGMEM) |
356 				VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
357 				VPE_CMD_POLL_REGMEM_HEADER_MEM(0));
358 	amdgpu_ring_write(ring, reg << 2);
359 	amdgpu_ring_write(ring, 0);
360 	amdgpu_ring_write(ring, val); /* reference */
361 	amdgpu_ring_write(ring, mask); /* mask */
362 	amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
363 				VPE_CMD_POLL_REGMEM_DW5_INTERVAL(10));
364 }
365 
366 static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid,
367 				   uint64_t pd_addr)
368 {
369 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
370 }
371 
372 static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring)
373 {
374 	unsigned int ret;
375 
376 	amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
377 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
378 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
379 	amdgpu_ring_write(ring, 1);
380 	ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
381 	amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
382 
383 	return ret;
384 }
385 
386 static void vpe_ring_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset)
387 {
388 	unsigned int cur;
389 
390 	WARN_ON_ONCE(offset > ring->buf_mask);
391 	WARN_ON_ONCE(ring->ring[offset] != 0x55aa55aa);
392 
393 	cur = (ring->wptr - 1) & ring->buf_mask;
394 	if (cur > offset)
395 		ring->ring[offset] = cur - offset;
396 	else
397 		ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
398 }
399 
400 static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
401 {
402 	struct amdgpu_device *adev = ring->adev;
403 	struct amdgpu_vpe *vpe = &adev->vpe;
404 	uint32_t preempt_reg = vpe->regs.queue0_preempt;
405 	int i, r = 0;
406 
407 	/* assert preemption condition */
408 	amdgpu_ring_set_preempt_cond_exec(ring, false);
409 
410 	/* emit the trailing fence */
411 	ring->trail_seq += 1;
412 	amdgpu_ring_alloc(ring, 10);
413 	vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0);
414 	amdgpu_ring_commit(ring);
415 
416 	/* assert IB preemption */
417 	WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1);
418 
419 	/* poll the trailing fence */
420 	for (i = 0; i < adev->usec_timeout; i++) {
421 		if (ring->trail_seq ==
422 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
423 			break;
424 		udelay(1);
425 	}
426 
427 	if (i >= adev->usec_timeout) {
428 		r = -EINVAL;
429 		dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx);
430 	}
431 
432 	/* deassert IB preemption */
433 	WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0);
434 
435 	/* deassert the preemption condition */
436 	amdgpu_ring_set_preempt_cond_exec(ring, true);
437 
438 	return r;
439 }
440 
441 static int vpe_set_clockgating_state(void *handle,
442 				     enum amd_clockgating_state state)
443 {
444 	return 0;
445 }
446 
447 static int vpe_set_powergating_state(void *handle,
448 				     enum amd_powergating_state state)
449 {
450 	return 0;
451 }
452 
453 static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring)
454 {
455 	struct amdgpu_device *adev = ring->adev;
456 	struct amdgpu_vpe *vpe = &adev->vpe;
457 	uint64_t rptr;
458 
459 	if (ring->use_doorbell) {
460 		rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr);
461 		dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr);
462 	} else {
463 		rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi));
464 		rptr = rptr << 32;
465 		rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo));
466 		dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr);
467 	}
468 
469 	return (rptr >> 2);
470 }
471 
472 static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring)
473 {
474 	struct amdgpu_device *adev = ring->adev;
475 	struct amdgpu_vpe *vpe = &adev->vpe;
476 	uint64_t wptr;
477 
478 	if (ring->use_doorbell) {
479 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
480 		dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr);
481 	} else {
482 		wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi));
483 		wptr = wptr << 32;
484 		wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo));
485 		dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr);
486 	}
487 
488 	return (wptr >> 2);
489 }
490 
491 static void vpe_ring_set_wptr(struct amdgpu_ring *ring)
492 {
493 	struct amdgpu_device *adev = ring->adev;
494 	struct amdgpu_vpe *vpe = &adev->vpe;
495 
496 	if (ring->use_doorbell) {
497 		dev_dbg(adev->dev, "Using doorbell, \
498 			wptr_offs == 0x%08x, \
499 			lower_32_bits(ring->wptr) << 2 == 0x%08x, \
500 			upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
501 			ring->wptr_offs,
502 			lower_32_bits(ring->wptr << 2),
503 			upper_32_bits(ring->wptr << 2));
504 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2);
505 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
506 	} else {
507 		dev_dbg(adev->dev, "Not using doorbell, \
508 			regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
509 			regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
510 			lower_32_bits(ring->wptr << 2),
511 			upper_32_bits(ring->wptr << 2));
512 		WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo),
513 		       lower_32_bits(ring->wptr << 2));
514 		WREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi),
515 		       upper_32_bits(ring->wptr << 2));
516 	}
517 }
518 
519 static int vpe_ring_test_ring(struct amdgpu_ring *ring)
520 {
521 	struct amdgpu_device *adev = ring->adev;
522 	const uint32_t test_pattern = 0xdeadbeef;
523 	uint32_t index, i;
524 	uint64_t wb_addr;
525 	int ret;
526 
527 	ret = amdgpu_device_wb_get(adev, &index);
528 	if (ret) {
529 		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
530 		return ret;
531 	}
532 
533 	adev->wb.wb[index] = 0;
534 	wb_addr = adev->wb.gpu_addr + (index * 4);
535 
536 	ret = amdgpu_ring_alloc(ring, 4);
537 	if (ret) {
538 		dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret);
539 		goto out;
540 	}
541 
542 	amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
543 	amdgpu_ring_write(ring, lower_32_bits(wb_addr));
544 	amdgpu_ring_write(ring, upper_32_bits(wb_addr));
545 	amdgpu_ring_write(ring, test_pattern);
546 	amdgpu_ring_commit(ring);
547 
548 	for (i = 0; i < adev->usec_timeout; i++) {
549 		if (le32_to_cpu(adev->wb.wb[index]) == test_pattern)
550 			goto out;
551 		udelay(1);
552 	}
553 
554 	ret = -ETIMEDOUT;
555 out:
556 	amdgpu_device_wb_free(adev, index);
557 
558 	return ret;
559 }
560 
561 static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
562 {
563 	struct amdgpu_device *adev = ring->adev;
564 	const uint32_t test_pattern = 0xdeadbeef;
565 	struct amdgpu_ib ib = {};
566 	struct dma_fence *f = NULL;
567 	uint32_t index;
568 	uint64_t wb_addr;
569 	int ret;
570 
571 	ret = amdgpu_device_wb_get(adev, &index);
572 	if (ret) {
573 		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
574 		return ret;
575 	}
576 
577 	adev->wb.wb[index] = 0;
578 	wb_addr = adev->wb.gpu_addr + (index * 4);
579 
580 	ret = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
581 	if (ret)
582 		goto err0;
583 
584 	ib.ptr[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
585 	ib.ptr[1] = lower_32_bits(wb_addr);
586 	ib.ptr[2] = upper_32_bits(wb_addr);
587 	ib.ptr[3] = test_pattern;
588 	ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
589 	ib.ptr[5] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
590 	ib.ptr[6] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
591 	ib.ptr[7] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
592 	ib.length_dw = 8;
593 
594 	ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
595 	if (ret)
596 		goto err1;
597 
598 	ret = dma_fence_wait_timeout(f, false, timeout);
599 	if (ret <= 0) {
600 		ret = ret ? : -ETIMEDOUT;
601 		goto err1;
602 	}
603 
604 	ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
605 
606 err1:
607 	amdgpu_ib_free(adev, &ib, NULL);
608 	dma_fence_put(f);
609 err0:
610 	amdgpu_device_wb_free(adev, index);
611 
612 	return ret;
613 }
614 
615 static const struct amdgpu_ring_funcs vpe_ring_funcs = {
616 	.type = AMDGPU_RING_TYPE_VPE,
617 	.align_mask = 0xf,
618 	.nop = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0),
619 	.support_64bit_ptrs = true,
620 	.get_rptr = vpe_ring_get_rptr,
621 	.get_wptr = vpe_ring_get_wptr,
622 	.set_wptr = vpe_ring_set_wptr,
623 	.emit_frame_size =
624 		5 + /* vpe_ring_init_cond_exec */
625 		6 + /* vpe_ring_emit_pipeline_sync */
626 		10 + 10 + 10 + /* vpe_ring_emit_fence */
627 		/* vpe_ring_emit_vm_flush */
628 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
629 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6,
630 	.emit_ib_size = 7 + 6,
631 	.emit_ib = vpe_ring_emit_ib,
632 	.emit_pipeline_sync = vpe_ring_emit_pipeline_sync,
633 	.emit_fence = vpe_ring_emit_fence,
634 	.emit_vm_flush = vpe_ring_emit_vm_flush,
635 	.emit_wreg = vpe_ring_emit_wreg,
636 	.emit_reg_wait = vpe_ring_emit_reg_wait,
637 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
638 	.insert_nop = vpe_ring_insert_nop,
639 	.pad_ib = vpe_ring_pad_ib,
640 	.test_ring = vpe_ring_test_ring,
641 	.test_ib = vpe_ring_test_ib,
642 	.init_cond_exec = vpe_ring_init_cond_exec,
643 	.patch_cond_exec = vpe_ring_patch_cond_exec,
644 	.preempt_ib = vpe_ring_preempt_ib,
645 };
646 
647 static void vpe_set_ring_funcs(struct amdgpu_device *adev)
648 {
649 	adev->vpe.ring.funcs = &vpe_ring_funcs;
650 }
651 
652 const struct amd_ip_funcs vpe_ip_funcs = {
653 	.name = "vpe_v6_1",
654 	.early_init = vpe_early_init,
655 	.late_init = NULL,
656 	.sw_init = vpe_sw_init,
657 	.sw_fini = vpe_sw_fini,
658 	.hw_init = vpe_hw_init,
659 	.hw_fini = vpe_hw_fini,
660 	.suspend = vpe_suspend,
661 	.resume = vpe_resume,
662 	.soft_reset = NULL,
663 	.set_clockgating_state = vpe_set_clockgating_state,
664 	.set_powergating_state = vpe_set_powergating_state,
665 };
666 
667 const struct amdgpu_ip_block_version vpe_v6_1_ip_block = {
668 	.type = AMD_IP_BLOCK_TYPE_VPE,
669 	.major = 6,
670 	.minor = 1,
671 	.rev = 0,
672 	.funcs = &vpe_ip_funcs,
673 };
674