xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27 
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37 
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS	1000
40 
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI	"radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI	"radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII	"radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY		"amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10	"amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
55 
56 #ifdef CONFIG_DRM_AMDGPU_CIK
57 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
58 MODULE_FIRMWARE(FIRMWARE_KABINI);
59 MODULE_FIRMWARE(FIRMWARE_KAVERI);
60 MODULE_FIRMWARE(FIRMWARE_HAWAII);
61 MODULE_FIRMWARE(FIRMWARE_MULLINS);
62 #endif
63 MODULE_FIRMWARE(FIRMWARE_TONGA);
64 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
65 MODULE_FIRMWARE(FIRMWARE_FIJI);
66 MODULE_FIRMWARE(FIRMWARE_STONEY);
67 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
68 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
69 
70 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
71 
72 /**
73  * amdgpu_vce_init - allocate memory, load vce firmware
74  *
75  * @adev: amdgpu_device pointer
76  *
77  * First step to get VCE online, allocate memory and load the firmware
78  */
79 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
80 {
81 	struct amdgpu_ring *ring;
82 	struct amd_sched_rq *rq;
83 	const char *fw_name;
84 	const struct common_firmware_header *hdr;
85 	unsigned ucode_version, version_major, version_minor, binary_id;
86 	int i, r;
87 
88 	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
89 
90 	switch (adev->asic_type) {
91 #ifdef CONFIG_DRM_AMDGPU_CIK
92 	case CHIP_BONAIRE:
93 		fw_name = FIRMWARE_BONAIRE;
94 		break;
95 	case CHIP_KAVERI:
96 		fw_name = FIRMWARE_KAVERI;
97 		break;
98 	case CHIP_KABINI:
99 		fw_name = FIRMWARE_KABINI;
100 		break;
101 	case CHIP_HAWAII:
102 		fw_name = FIRMWARE_HAWAII;
103 		break;
104 	case CHIP_MULLINS:
105 		fw_name = FIRMWARE_MULLINS;
106 		break;
107 #endif
108 	case CHIP_TONGA:
109 		fw_name = FIRMWARE_TONGA;
110 		break;
111 	case CHIP_CARRIZO:
112 		fw_name = FIRMWARE_CARRIZO;
113 		break;
114 	case CHIP_FIJI:
115 		fw_name = FIRMWARE_FIJI;
116 		break;
117 	case CHIP_STONEY:
118 		fw_name = FIRMWARE_STONEY;
119 		break;
120 	case CHIP_POLARIS10:
121 		fw_name = FIRMWARE_POLARIS10;
122 		break;
123 	case CHIP_POLARIS11:
124 		fw_name = FIRMWARE_POLARIS11;
125 		break;
126 
127 	default:
128 		return -EINVAL;
129 	}
130 
131 	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
132 	if (r) {
133 		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
134 			fw_name);
135 		return r;
136 	}
137 
138 	r = amdgpu_ucode_validate(adev->vce.fw);
139 	if (r) {
140 		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
141 			fw_name);
142 		release_firmware(adev->vce.fw);
143 		adev->vce.fw = NULL;
144 		return r;
145 	}
146 
147 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
148 
149 	ucode_version = le32_to_cpu(hdr->ucode_version);
150 	version_major = (ucode_version >> 20) & 0xfff;
151 	version_minor = (ucode_version >> 8) & 0xfff;
152 	binary_id = ucode_version & 0xff;
153 	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
154 		version_major, version_minor, binary_id);
155 	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
156 				(binary_id << 8));
157 
158 	/* allocate firmware, stack and heap BO */
159 
160 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
161 			     AMDGPU_GEM_DOMAIN_VRAM,
162 			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
163 			     NULL, NULL, &adev->vce.vcpu_bo);
164 	if (r) {
165 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
166 		return r;
167 	}
168 
169 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
170 	if (r) {
171 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
172 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
173 		return r;
174 	}
175 
176 	r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
177 			  &adev->vce.gpu_addr);
178 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
179 	if (r) {
180 		amdgpu_bo_unref(&adev->vce.vcpu_bo);
181 		dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
182 		return r;
183 	}
184 
185 
186 	ring = &adev->vce.ring[0];
187 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
188 	r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
189 				  rq, amdgpu_sched_jobs);
190 	if (r != 0) {
191 		DRM_ERROR("Failed setting up VCE run queue.\n");
192 		return r;
193 	}
194 
195 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
196 		atomic_set(&adev->vce.handles[i], 0);
197 		adev->vce.filp[i] = NULL;
198 	}
199 
200 	return 0;
201 }
202 
203 /**
204  * amdgpu_vce_fini - free memory
205  *
206  * @adev: amdgpu_device pointer
207  *
208  * Last step on VCE teardown, free firmware memory
209  */
210 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
211 {
212 	if (adev->vce.vcpu_bo == NULL)
213 		return 0;
214 
215 	amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
216 
217 	amdgpu_bo_unref(&adev->vce.vcpu_bo);
218 
219 	amdgpu_ring_fini(&adev->vce.ring[0]);
220 	amdgpu_ring_fini(&adev->vce.ring[1]);
221 
222 	release_firmware(adev->vce.fw);
223 
224 	return 0;
225 }
226 
227 /**
228  * amdgpu_vce_suspend - unpin VCE fw memory
229  *
230  * @adev: amdgpu_device pointer
231  *
232  */
233 int amdgpu_vce_suspend(struct amdgpu_device *adev)
234 {
235 	int i;
236 
237 	if (adev->vce.vcpu_bo == NULL)
238 		return 0;
239 
240 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
241 		if (atomic_read(&adev->vce.handles[i]))
242 			break;
243 
244 	if (i == AMDGPU_MAX_VCE_HANDLES)
245 		return 0;
246 
247 	cancel_delayed_work_sync(&adev->vce.idle_work);
248 	/* TODO: suspending running encoding sessions isn't supported */
249 	return -EINVAL;
250 }
251 
252 /**
253  * amdgpu_vce_resume - pin VCE fw memory
254  *
255  * @adev: amdgpu_device pointer
256  *
257  */
258 int amdgpu_vce_resume(struct amdgpu_device *adev)
259 {
260 	void *cpu_addr;
261 	const struct common_firmware_header *hdr;
262 	unsigned offset;
263 	int r;
264 
265 	if (adev->vce.vcpu_bo == NULL)
266 		return -EINVAL;
267 
268 	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
269 	if (r) {
270 		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
271 		return r;
272 	}
273 
274 	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
275 	if (r) {
276 		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
277 		dev_err(adev->dev, "(%d) VCE map failed\n", r);
278 		return r;
279 	}
280 
281 	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
282 	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
283 	memcpy(cpu_addr, (adev->vce.fw->data) + offset,
284 		(adev->vce.fw->size) - offset);
285 
286 	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
287 
288 	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
289 
290 	return 0;
291 }
292 
293 /**
294  * amdgpu_vce_idle_work_handler - power off VCE
295  *
296  * @work: pointer to work structure
297  *
298  * power of VCE when it's not used any more
299  */
300 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
301 {
302 	struct amdgpu_device *adev =
303 		container_of(work, struct amdgpu_device, vce.idle_work.work);
304 
305 	if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
306 	    (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
307 		if (adev->pm.dpm_enabled) {
308 			amdgpu_dpm_enable_vce(adev, false);
309 		} else {
310 			amdgpu_asic_set_vce_clocks(adev, 0, 0);
311 		}
312 	} else {
313 		schedule_delayed_work(&adev->vce.idle_work,
314 				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
315 	}
316 }
317 
318 /**
319  * amdgpu_vce_note_usage - power up VCE
320  *
321  * @adev: amdgpu_device pointer
322  *
323  * Make sure VCE is powerd up when we want to use it
324  */
325 static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
326 {
327 	bool streams_changed = false;
328 	bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
329 	set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
330 					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
331 
332 	if (adev->pm.dpm_enabled) {
333 		/* XXX figure out if the streams changed */
334 		streams_changed = false;
335 	}
336 
337 	if (set_clocks || streams_changed) {
338 		if (adev->pm.dpm_enabled) {
339 			amdgpu_dpm_enable_vce(adev, true);
340 		} else {
341 			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
342 		}
343 	}
344 }
345 
346 /**
347  * amdgpu_vce_free_handles - free still open VCE handles
348  *
349  * @adev: amdgpu_device pointer
350  * @filp: drm file pointer
351  *
352  * Close all VCE handles still open by this file pointer
353  */
354 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
355 {
356 	struct amdgpu_ring *ring = &adev->vce.ring[0];
357 	int i, r;
358 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
359 		uint32_t handle = atomic_read(&adev->vce.handles[i]);
360 		if (!handle || adev->vce.filp[i] != filp)
361 			continue;
362 
363 		amdgpu_vce_note_usage(adev);
364 
365 		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
366 		if (r)
367 			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
368 
369 		adev->vce.filp[i] = NULL;
370 		atomic_set(&adev->vce.handles[i], 0);
371 	}
372 }
373 
374 /**
375  * amdgpu_vce_get_create_msg - generate a VCE create msg
376  *
377  * @adev: amdgpu_device pointer
378  * @ring: ring we should submit the msg to
379  * @handle: VCE session handle to use
380  * @fence: optional fence to return
381  *
382  * Open up a stream for HW test
383  */
384 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
385 			      struct fence **fence)
386 {
387 	const unsigned ib_size_dw = 1024;
388 	struct amdgpu_job *job;
389 	struct amdgpu_ib *ib;
390 	struct fence *f = NULL;
391 	uint64_t dummy;
392 	int i, r;
393 
394 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
395 	if (r)
396 		return r;
397 
398 	ib = &job->ibs[0];
399 
400 	dummy = ib->gpu_addr + 1024;
401 
402 	/* stitch together an VCE create msg */
403 	ib->length_dw = 0;
404 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
405 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
406 	ib->ptr[ib->length_dw++] = handle;
407 
408 	if ((ring->adev->vce.fw_version >> 24) >= 52)
409 		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
410 	else
411 		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
412 	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
413 	ib->ptr[ib->length_dw++] = 0x00000000;
414 	ib->ptr[ib->length_dw++] = 0x00000042;
415 	ib->ptr[ib->length_dw++] = 0x0000000a;
416 	ib->ptr[ib->length_dw++] = 0x00000001;
417 	ib->ptr[ib->length_dw++] = 0x00000080;
418 	ib->ptr[ib->length_dw++] = 0x00000060;
419 	ib->ptr[ib->length_dw++] = 0x00000100;
420 	ib->ptr[ib->length_dw++] = 0x00000100;
421 	ib->ptr[ib->length_dw++] = 0x0000000c;
422 	ib->ptr[ib->length_dw++] = 0x00000000;
423 	if ((ring->adev->vce.fw_version >> 24) >= 52) {
424 		ib->ptr[ib->length_dw++] = 0x00000000;
425 		ib->ptr[ib->length_dw++] = 0x00000000;
426 		ib->ptr[ib->length_dw++] = 0x00000000;
427 		ib->ptr[ib->length_dw++] = 0x00000000;
428 	}
429 
430 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
431 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
432 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
433 	ib->ptr[ib->length_dw++] = dummy;
434 	ib->ptr[ib->length_dw++] = 0x00000001;
435 
436 	for (i = ib->length_dw; i < ib_size_dw; ++i)
437 		ib->ptr[i] = 0x0;
438 
439 	r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
440 	job->fence = f;
441 	if (r)
442 		goto err;
443 
444 	amdgpu_job_free(job);
445 	if (fence)
446 		*fence = fence_get(f);
447 	fence_put(f);
448 	return 0;
449 
450 err:
451 	amdgpu_job_free(job);
452 	return r;
453 }
454 
455 /**
456  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
457  *
458  * @adev: amdgpu_device pointer
459  * @ring: ring we should submit the msg to
460  * @handle: VCE session handle to use
461  * @fence: optional fence to return
462  *
463  * Close up a stream for HW test or if userspace failed to do so
464  */
465 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
466 			       bool direct, struct fence **fence)
467 {
468 	const unsigned ib_size_dw = 1024;
469 	struct amdgpu_job *job;
470 	struct amdgpu_ib *ib;
471 	struct fence *f = NULL;
472 	uint64_t dummy;
473 	int i, r;
474 
475 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
476 	if (r)
477 		return r;
478 
479 	ib = &job->ibs[0];
480 	dummy = ib->gpu_addr + 1024;
481 
482 	/* stitch together an VCE destroy msg */
483 	ib->length_dw = 0;
484 	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
485 	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
486 	ib->ptr[ib->length_dw++] = handle;
487 
488 	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
489 	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
490 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
491 	ib->ptr[ib->length_dw++] = dummy;
492 	ib->ptr[ib->length_dw++] = 0x00000001;
493 
494 	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
495 	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
496 
497 	for (i = ib->length_dw; i < ib_size_dw; ++i)
498 		ib->ptr[i] = 0x0;
499 
500 	if (direct) {
501 		r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
502 		job->fence = f;
503 		if (r)
504 			goto err;
505 
506 		amdgpu_job_free(job);
507 	} else {
508 		r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
509 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
510 		if (r)
511 			goto err;
512 	}
513 
514 	if (fence)
515 		*fence = fence_get(f);
516 	fence_put(f);
517 	return 0;
518 
519 err:
520 	amdgpu_job_free(job);
521 	return r;
522 }
523 
524 /**
525  * amdgpu_vce_cs_reloc - command submission relocation
526  *
527  * @p: parser context
528  * @lo: address of lower dword
529  * @hi: address of higher dword
530  * @size: minimum size
531  *
532  * Patch relocation inside command stream with real buffer address
533  */
534 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
535 			       int lo, int hi, unsigned size, uint32_t index)
536 {
537 	struct amdgpu_bo_va_mapping *mapping;
538 	struct amdgpu_bo *bo;
539 	uint64_t addr;
540 
541 	if (index == 0xffffffff)
542 		index = 0;
543 
544 	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
545 	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
546 	addr += ((uint64_t)size) * ((uint64_t)index);
547 
548 	mapping = amdgpu_cs_find_mapping(p, addr, &bo);
549 	if (mapping == NULL) {
550 		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
551 			  addr, lo, hi, size, index);
552 		return -EINVAL;
553 	}
554 
555 	if ((addr + (uint64_t)size) >
556 	    ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
557 		DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
558 			  addr, lo, hi);
559 		return -EINVAL;
560 	}
561 
562 	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
563 	addr += amdgpu_bo_gpu_offset(bo);
564 	addr -= ((uint64_t)size) * ((uint64_t)index);
565 
566 	amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
567 	amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
568 
569 	return 0;
570 }
571 
572 /**
573  * amdgpu_vce_validate_handle - validate stream handle
574  *
575  * @p: parser context
576  * @handle: handle to validate
577  * @allocated: allocated a new handle?
578  *
579  * Validates the handle and return the found session index or -EINVAL
580  * we we don't have another free session index.
581  */
582 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
583 				      uint32_t handle, bool *allocated)
584 {
585 	unsigned i;
586 
587 	*allocated = false;
588 
589 	/* validate the handle */
590 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
591 		if (atomic_read(&p->adev->vce.handles[i]) == handle) {
592 			if (p->adev->vce.filp[i] != p->filp) {
593 				DRM_ERROR("VCE handle collision detected!\n");
594 				return -EINVAL;
595 			}
596 			return i;
597 		}
598 	}
599 
600 	/* handle not found try to alloc a new one */
601 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
602 		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
603 			p->adev->vce.filp[i] = p->filp;
604 			p->adev->vce.img_size[i] = 0;
605 			*allocated = true;
606 			return i;
607 		}
608 	}
609 
610 	DRM_ERROR("No more free VCE handles!\n");
611 	return -EINVAL;
612 }
613 
614 /**
615  * amdgpu_vce_cs_parse - parse and validate the command stream
616  *
617  * @p: parser context
618  *
619  */
620 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
621 {
622 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
623 	unsigned fb_idx = 0, bs_idx = 0;
624 	int session_idx = -1;
625 	bool destroyed = false;
626 	bool created = false;
627 	bool allocated = false;
628 	uint32_t tmp, handle = 0;
629 	uint32_t *size = &tmp;
630 	int i, r = 0, idx = 0;
631 
632 	amdgpu_vce_note_usage(p->adev);
633 
634 	while (idx < ib->length_dw) {
635 		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
636 		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
637 
638 		if ((len < 8) || (len & 3)) {
639 			DRM_ERROR("invalid VCE command length (%d)!\n", len);
640 			r = -EINVAL;
641 			goto out;
642 		}
643 
644 		if (destroyed) {
645 			DRM_ERROR("No other command allowed after destroy!\n");
646 			r = -EINVAL;
647 			goto out;
648 		}
649 
650 		switch (cmd) {
651 		case 0x00000001: // session
652 			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
653 			session_idx = amdgpu_vce_validate_handle(p, handle,
654 								 &allocated);
655 			if (session_idx < 0)
656 				return session_idx;
657 			size = &p->adev->vce.img_size[session_idx];
658 			break;
659 
660 		case 0x00000002: // task info
661 			fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
662 			bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
663 			break;
664 
665 		case 0x01000001: // create
666 			created = true;
667 			if (!allocated) {
668 				DRM_ERROR("Handle already in use!\n");
669 				r = -EINVAL;
670 				goto out;
671 			}
672 
673 			*size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
674 				amdgpu_get_ib_value(p, ib_idx, idx + 10) *
675 				8 * 3 / 2;
676 			break;
677 
678 		case 0x04000001: // config extension
679 		case 0x04000002: // pic control
680 		case 0x04000005: // rate control
681 		case 0x04000007: // motion estimation
682 		case 0x04000008: // rdo
683 		case 0x04000009: // vui
684 		case 0x05000002: // auxiliary buffer
685 			break;
686 
687 		case 0x03000001: // encode
688 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
689 						*size, 0);
690 			if (r)
691 				goto out;
692 
693 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
694 						*size / 3, 0);
695 			if (r)
696 				goto out;
697 			break;
698 
699 		case 0x02000001: // destroy
700 			destroyed = true;
701 			break;
702 
703 		case 0x05000001: // context buffer
704 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
705 						*size * 2, 0);
706 			if (r)
707 				goto out;
708 			break;
709 
710 		case 0x05000004: // video bitstream buffer
711 			tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
712 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
713 						tmp, bs_idx);
714 			if (r)
715 				goto out;
716 			break;
717 
718 		case 0x05000005: // feedback buffer
719 			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
720 						4096, fb_idx);
721 			if (r)
722 				goto out;
723 			break;
724 
725 		default:
726 			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
727 			r = -EINVAL;
728 			goto out;
729 		}
730 
731 		if (session_idx == -1) {
732 			DRM_ERROR("no session command at start of IB\n");
733 			r = -EINVAL;
734 			goto out;
735 		}
736 
737 		idx += len / 4;
738 	}
739 
740 	if (allocated && !created) {
741 		DRM_ERROR("New session without create command!\n");
742 		r = -ENOENT;
743 	}
744 
745 out:
746 	if ((!r && destroyed) || (r && allocated)) {
747 		/*
748 		 * IB contains a destroy msg or we have allocated an
749 		 * handle and got an error, anyway free the handle
750 		 */
751 		for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
752 			atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
753 	}
754 
755 	return r;
756 }
757 
758 /**
759  * amdgpu_vce_ring_emit_ib - execute indirect buffer
760  *
761  * @ring: engine to use
762  * @ib: the IB to execute
763  *
764  */
765 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
766 			     unsigned vm_id, bool ctx_switch)
767 {
768 	amdgpu_ring_write(ring, VCE_CMD_IB);
769 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
770 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
771 	amdgpu_ring_write(ring, ib->length_dw);
772 }
773 
774 /**
775  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
776  *
777  * @ring: engine to use
778  * @fence: the fence
779  *
780  */
781 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
782 				unsigned flags)
783 {
784 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
785 
786 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
787 	amdgpu_ring_write(ring, addr);
788 	amdgpu_ring_write(ring, upper_32_bits(addr));
789 	amdgpu_ring_write(ring, seq);
790 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
791 	amdgpu_ring_write(ring, VCE_CMD_END);
792 }
793 
794 /**
795  * amdgpu_vce_ring_test_ring - test if VCE ring is working
796  *
797  * @ring: the engine to test on
798  *
799  */
800 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
801 {
802 	struct amdgpu_device *adev = ring->adev;
803 	uint32_t rptr = amdgpu_ring_get_rptr(ring);
804 	unsigned i;
805 	int r;
806 
807 	r = amdgpu_ring_alloc(ring, 16);
808 	if (r) {
809 		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
810 			  ring->idx, r);
811 		return r;
812 	}
813 	amdgpu_ring_write(ring, VCE_CMD_END);
814 	amdgpu_ring_commit(ring);
815 
816 	for (i = 0; i < adev->usec_timeout; i++) {
817 		if (amdgpu_ring_get_rptr(ring) != rptr)
818 			break;
819 		DRM_UDELAY(1);
820 	}
821 
822 	if (i < adev->usec_timeout) {
823 		DRM_INFO("ring test on %d succeeded in %d usecs\n",
824 			 ring->idx, i);
825 	} else {
826 		DRM_ERROR("amdgpu: ring %d test failed\n",
827 			  ring->idx);
828 		r = -ETIMEDOUT;
829 	}
830 
831 	return r;
832 }
833 
834 /**
835  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
836  *
837  * @ring: the engine to test on
838  *
839  */
840 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
841 {
842 	struct fence *fence = NULL;
843 	int r;
844 
845 	/* skip vce ring1 ib test for now, since it's not reliable */
846 	if (ring == &ring->adev->vce.ring[1])
847 		return 0;
848 
849 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
850 	if (r) {
851 		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
852 		goto error;
853 	}
854 
855 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
856 	if (r) {
857 		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
858 		goto error;
859 	}
860 
861 	r = fence_wait(fence, false);
862 	if (r) {
863 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
864 	} else {
865 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
866 	}
867 error:
868 	fence_put(fence);
869 	return r;
870 }
871