xref: /linux/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c (revision fcb3ad4366b9c810cbb9da34c076a9a52d8aa1e0)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Wave5 series multi-standard codec IP - helper functions
4  *
5  * Copyright (C) 2021-2023 CHIPS&MEDIA INC
6  */
7 
8 #include <linux/bug.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/delay.h>
11 #include "wave5-vpuapi.h"
12 #include "wave5-regdefine.h"
13 #include "wave5.h"
14 
15 #define DECODE_ALL_TEMPORAL_LAYERS 0
16 #define DECODE_ALL_SPATIAL_LAYERS 0
17 
18 static int wave5_initialize_vpu(struct device *dev, u8 *code, size_t size)
19 {
20 	int ret;
21 	struct vpu_device *vpu_dev = dev_get_drvdata(dev);
22 
23 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
24 	if (ret)
25 		return ret;
26 
27 	if (wave5_vpu_is_init(vpu_dev)) {
28 		wave5_vpu_re_init(dev, (void *)code, size);
29 		ret = -EBUSY;
30 		goto err_out;
31 	}
32 
33 	ret = wave5_vpu_reset(dev, SW_RESET_ON_BOOT);
34 	if (ret)
35 		goto err_out;
36 
37 	ret = wave5_vpu_init(dev, (void *)code, size);
38 
39 err_out:
40 	mutex_unlock(&vpu_dev->hw_lock);
41 	return ret;
42 }
43 
44 int wave5_vpu_init_with_bitcode(struct device *dev, u8 *bitcode, size_t size)
45 {
46 	if (!bitcode || size == 0)
47 		return -EINVAL;
48 
49 	return wave5_initialize_vpu(dev, bitcode, size);
50 }
51 
52 int wave5_vpu_flush_instance(struct vpu_instance *inst)
53 {
54 	int ret = 0;
55 	int retry = 0;
56 
57 	ret = mutex_lock_interruptible(&inst->dev->hw_lock);
58 	if (ret)
59 		return ret;
60 	do {
61 		/*
62 		 * Repeat the FLUSH command until the firmware reports that the
63 		 * VPU isn't running anymore
64 		 */
65 		ret = wave5_vpu_hw_flush_instance(inst);
66 		if (ret < 0 && ret != -EBUSY) {
67 			dev_warn(inst->dev->dev, "Flush of %s instance with id: %d fail: %d\n",
68 				 inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id,
69 				 ret);
70 			mutex_unlock(&inst->dev->hw_lock);
71 			return ret;
72 		}
73 		if (ret == -EBUSY && retry++ >= MAX_FIRMWARE_CALL_RETRY) {
74 			dev_warn(inst->dev->dev, "Flush of %s instance with id: %d timed out!\n",
75 				 inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id);
76 			mutex_unlock(&inst->dev->hw_lock);
77 			return -ETIMEDOUT;
78 		}
79 	} while (ret != 0);
80 	mutex_unlock(&inst->dev->hw_lock);
81 
82 	return ret;
83 }
84 
85 int wave5_vpu_get_version_info(struct device *dev, u32 *revision, unsigned int *product_id)
86 {
87 	int ret;
88 	struct vpu_device *vpu_dev = dev_get_drvdata(dev);
89 
90 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
91 	if (ret)
92 		return ret;
93 
94 	if (!wave5_vpu_is_init(vpu_dev)) {
95 		ret = -EINVAL;
96 		goto err_out;
97 	}
98 
99 	if (product_id)
100 		*product_id = vpu_dev->product;
101 	ret = wave5_vpu_get_version(vpu_dev, revision);
102 
103 err_out:
104 	mutex_unlock(&vpu_dev->hw_lock);
105 	return ret;
106 }
107 
108 static int wave5_check_dec_open_param(struct vpu_instance *inst, struct dec_open_param *param)
109 {
110 	if (inst->id >= MAX_NUM_INSTANCE) {
111 		dev_err(inst->dev->dev, "Too many simultaneous instances: %d (max: %u)\n",
112 			inst->id, MAX_NUM_INSTANCE);
113 		return -EOPNOTSUPP;
114 	}
115 
116 	if (param->bitstream_buffer % 8) {
117 		dev_err(inst->dev->dev,
118 			"Bitstream buffer must be aligned to a multiple of 8\n");
119 		return -EINVAL;
120 	}
121 
122 	if (param->bitstream_buffer_size % 1024 ||
123 	    param->bitstream_buffer_size < MIN_BITSTREAM_BUFFER_SIZE) {
124 		dev_err(inst->dev->dev,
125 			"Bitstream buffer size must be aligned to a multiple of 1024 and have a minimum size of %d\n",
126 			MIN_BITSTREAM_BUFFER_SIZE);
127 		return -EINVAL;
128 	}
129 
130 	return 0;
131 }
132 
133 int wave5_vpu_dec_open(struct vpu_instance *inst, struct dec_open_param *open_param)
134 {
135 	struct dec_info *p_dec_info;
136 	int ret;
137 	struct vpu_device *vpu_dev = inst->dev;
138 	dma_addr_t buffer_addr;
139 	size_t buffer_size;
140 
141 	ret = wave5_check_dec_open_param(inst, open_param);
142 	if (ret)
143 		return ret;
144 
145 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
146 	if (ret)
147 		return ret;
148 
149 	if (!wave5_vpu_is_init(vpu_dev)) {
150 		mutex_unlock(&vpu_dev->hw_lock);
151 		return -ENODEV;
152 	}
153 
154 	p_dec_info = &inst->codec_info->dec_info;
155 	memcpy(&p_dec_info->open_param, open_param, sizeof(struct dec_open_param));
156 
157 	buffer_addr = open_param->bitstream_buffer;
158 	buffer_size = open_param->bitstream_buffer_size;
159 	p_dec_info->stream_wr_ptr = buffer_addr;
160 	p_dec_info->stream_rd_ptr = buffer_addr;
161 	p_dec_info->stream_buf_start_addr = buffer_addr;
162 	p_dec_info->stream_buf_size = buffer_size;
163 	p_dec_info->stream_buf_end_addr = buffer_addr + buffer_size;
164 	p_dec_info->reorder_enable = TRUE;
165 	p_dec_info->temp_id_select_mode = TEMPORAL_ID_MODE_ABSOLUTE;
166 	p_dec_info->target_temp_id = DECODE_ALL_TEMPORAL_LAYERS;
167 	p_dec_info->target_spatial_id = DECODE_ALL_SPATIAL_LAYERS;
168 
169 	ret = wave5_vpu_build_up_dec_param(inst, open_param);
170 	mutex_unlock(&vpu_dev->hw_lock);
171 
172 	return ret;
173 }
174 
175 static int reset_auxiliary_buffers(struct vpu_instance *inst, unsigned int index)
176 {
177 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
178 
179 	if (index >= MAX_REG_FRAME)
180 		return 1;
181 
182 	if (p_dec_info->vb_mv[index].size == 0 && p_dec_info->vb_fbc_y_tbl[index].size == 0 &&
183 	    p_dec_info->vb_fbc_c_tbl[index].size == 0)
184 		return 1;
185 
186 	wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_mv[index]);
187 	wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_y_tbl[index]);
188 	wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_c_tbl[index]);
189 
190 	return 0;
191 }
192 
193 int wave5_vpu_dec_close(struct vpu_instance *inst, u32 *fail_res)
194 {
195 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
196 	int ret;
197 	int retry = 0;
198 	struct vpu_device *vpu_dev = inst->dev;
199 	int i;
200 	int inst_count = 0;
201 	struct vpu_instance *inst_elm;
202 
203 	*fail_res = 0;
204 	if (!inst->codec_info)
205 		return -EINVAL;
206 
207 	pm_runtime_resume_and_get(inst->dev->dev);
208 
209 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
210 	if (ret) {
211 		pm_runtime_put_sync(inst->dev->dev);
212 		return ret;
213 	}
214 
215 	do {
216 		ret = wave5_vpu_dec_finish_seq(inst, fail_res);
217 		if (ret < 0 && *fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING) {
218 			dev_warn(inst->dev->dev, "dec_finish_seq timed out\n");
219 			goto unlock_and_return;
220 		}
221 
222 		if (*fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING &&
223 		    retry++ >= MAX_FIRMWARE_CALL_RETRY) {
224 			ret = -ETIMEDOUT;
225 			goto unlock_and_return;
226 		}
227 	} while (ret != 0);
228 
229 	dev_dbg(inst->dev->dev, "%s: dec_finish_seq complete\n", __func__);
230 
231 	wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
232 
233 	for (i = 0 ; i < MAX_REG_FRAME; i++) {
234 		ret = reset_auxiliary_buffers(inst, i);
235 		if (ret) {
236 			ret = 0;
237 			break;
238 		}
239 	}
240 
241 	wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_task);
242 
243 	list_for_each_entry(inst_elm, &vpu_dev->instances, list)
244 		inst_count++;
245 	if (inst_count == 1)
246 		pm_runtime_dont_use_autosuspend(vpu_dev->dev);
247 
248 unlock_and_return:
249 	mutex_unlock(&vpu_dev->hw_lock);
250 	pm_runtime_put_sync(inst->dev->dev);
251 	return ret;
252 }
253 
254 int wave5_vpu_dec_issue_seq_init(struct vpu_instance *inst)
255 {
256 	int ret;
257 	struct vpu_device *vpu_dev = inst->dev;
258 
259 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
260 	if (ret)
261 		return ret;
262 
263 	ret = wave5_vpu_dec_init_seq(inst);
264 
265 	mutex_unlock(&vpu_dev->hw_lock);
266 
267 	return ret;
268 }
269 
270 int wave5_vpu_dec_complete_seq_init(struct vpu_instance *inst, struct dec_initial_info *info)
271 {
272 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
273 	int ret;
274 	struct vpu_device *vpu_dev = inst->dev;
275 
276 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
277 	if (ret)
278 		return ret;
279 
280 	ret = wave5_vpu_dec_get_seq_info(inst, info);
281 	if (!ret)
282 		p_dec_info->initial_info_obtained = true;
283 
284 	info->rd_ptr = wave5_dec_get_rd_ptr(inst);
285 	info->wr_ptr = p_dec_info->stream_wr_ptr;
286 
287 	p_dec_info->initial_info = *info;
288 
289 	mutex_unlock(&vpu_dev->hw_lock);
290 
291 	return ret;
292 }
293 
294 int wave5_vpu_dec_register_frame_buffer_ex(struct vpu_instance *inst, int num_of_decoding_fbs,
295 					   int num_of_display_fbs, int stride, int height)
296 {
297 	struct dec_info *p_dec_info;
298 	int ret;
299 	struct vpu_device *vpu_dev = inst->dev;
300 	struct frame_buffer *fb;
301 
302 	if (num_of_decoding_fbs >= WAVE5_MAX_FBS || num_of_display_fbs >= WAVE5_MAX_FBS)
303 		return -EINVAL;
304 
305 	p_dec_info = &inst->codec_info->dec_info;
306 	p_dec_info->num_of_decoding_fbs = num_of_decoding_fbs;
307 	p_dec_info->num_of_display_fbs = num_of_display_fbs;
308 	p_dec_info->stride = stride;
309 
310 	if (!p_dec_info->initial_info_obtained)
311 		return -EINVAL;
312 
313 	if (stride < p_dec_info->initial_info.pic_width || (stride % 8 != 0) ||
314 	    height < p_dec_info->initial_info.pic_height)
315 		return -EINVAL;
316 
317 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
318 	if (ret)
319 		return ret;
320 
321 	fb = inst->frame_buf;
322 	ret = wave5_vpu_dec_register_framebuffer(inst, &fb[p_dec_info->num_of_decoding_fbs],
323 						 LINEAR_FRAME_MAP, p_dec_info->num_of_display_fbs);
324 	if (ret)
325 		goto err_out;
326 
327 	ret = wave5_vpu_dec_register_framebuffer(inst, &fb[0], COMPRESSED_FRAME_MAP,
328 						 p_dec_info->num_of_decoding_fbs);
329 
330 err_out:
331 	mutex_unlock(&vpu_dev->hw_lock);
332 
333 	return ret;
334 }
335 
336 int wave5_vpu_dec_get_bitstream_buffer(struct vpu_instance *inst, dma_addr_t *prd_ptr,
337 				       dma_addr_t *pwr_ptr, size_t *size)
338 {
339 	struct dec_info *p_dec_info;
340 	dma_addr_t rd_ptr;
341 	dma_addr_t wr_ptr;
342 	int room;
343 	struct vpu_device *vpu_dev = inst->dev;
344 	int ret;
345 
346 	p_dec_info = &inst->codec_info->dec_info;
347 
348 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
349 	if (ret)
350 		return ret;
351 	rd_ptr = wave5_dec_get_rd_ptr(inst);
352 	mutex_unlock(&vpu_dev->hw_lock);
353 
354 	wr_ptr = p_dec_info->stream_wr_ptr;
355 
356 	if (wr_ptr < rd_ptr)
357 		room = rd_ptr - wr_ptr;
358 	else
359 		room = (p_dec_info->stream_buf_end_addr - wr_ptr) +
360 			(rd_ptr - p_dec_info->stream_buf_start_addr);
361 	room--;
362 
363 	if (prd_ptr)
364 		*prd_ptr = rd_ptr;
365 	if (pwr_ptr)
366 		*pwr_ptr = wr_ptr;
367 	if (size)
368 		*size = room;
369 
370 	return 0;
371 }
372 
373 int wave5_vpu_dec_update_bitstream_buffer(struct vpu_instance *inst, size_t size)
374 {
375 	struct dec_info *p_dec_info;
376 	dma_addr_t wr_ptr;
377 	dma_addr_t rd_ptr;
378 	int ret;
379 	struct vpu_device *vpu_dev = inst->dev;
380 
381 	if (!inst->codec_info)
382 		return -EINVAL;
383 
384 	p_dec_info = &inst->codec_info->dec_info;
385 	wr_ptr = p_dec_info->stream_wr_ptr;
386 	rd_ptr = p_dec_info->stream_rd_ptr;
387 
388 	if (size > 0) {
389 		if (wr_ptr < rd_ptr && rd_ptr <= wr_ptr + size)
390 			return -EINVAL;
391 
392 		wr_ptr += size;
393 
394 		if (wr_ptr > p_dec_info->stream_buf_end_addr) {
395 			u32 room = wr_ptr - p_dec_info->stream_buf_end_addr;
396 
397 			wr_ptr = p_dec_info->stream_buf_start_addr;
398 			wr_ptr += room;
399 		} else if (wr_ptr == p_dec_info->stream_buf_end_addr) {
400 			wr_ptr = p_dec_info->stream_buf_start_addr;
401 		}
402 
403 		p_dec_info->stream_wr_ptr = wr_ptr;
404 		p_dec_info->stream_rd_ptr = rd_ptr;
405 	}
406 
407 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
408 	if (ret)
409 		return ret;
410 	ret = wave5_vpu_dec_set_bitstream_flag(inst, (size == 0));
411 	mutex_unlock(&vpu_dev->hw_lock);
412 
413 	return ret;
414 }
415 
416 int wave5_vpu_dec_start_one_frame(struct vpu_instance *inst, u32 *res_fail)
417 {
418 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
419 	int ret;
420 	struct vpu_device *vpu_dev = inst->dev;
421 
422 	if (p_dec_info->stride == 0) /* this means frame buffers have not been registered. */
423 		return -EINVAL;
424 
425 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
426 	if (ret)
427 		return ret;
428 
429 	ret = wave5_vpu_decode(inst, res_fail);
430 
431 	mutex_unlock(&vpu_dev->hw_lock);
432 
433 	return ret;
434 }
435 
436 int wave5_vpu_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr, int update_wr_ptr)
437 {
438 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
439 	int ret;
440 	struct vpu_device *vpu_dev = inst->dev;
441 
442 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
443 	if (ret)
444 		return ret;
445 
446 	ret = wave5_dec_set_rd_ptr(inst, addr);
447 
448 	p_dec_info->stream_rd_ptr = addr;
449 	if (update_wr_ptr)
450 		p_dec_info->stream_wr_ptr = addr;
451 
452 	mutex_unlock(&vpu_dev->hw_lock);
453 
454 	return ret;
455 }
456 
457 dma_addr_t wave5_vpu_dec_get_rd_ptr(struct vpu_instance *inst)
458 {
459 	int ret;
460 	dma_addr_t rd_ptr;
461 
462 	ret = mutex_lock_interruptible(&inst->dev->hw_lock);
463 	if (ret)
464 		return ret;
465 
466 	rd_ptr = wave5_dec_get_rd_ptr(inst);
467 
468 	mutex_unlock(&inst->dev->hw_lock);
469 
470 	return rd_ptr;
471 }
472 
473 int wave5_vpu_dec_get_output_info(struct vpu_instance *inst, struct dec_output_info *info)
474 {
475 	struct dec_info *p_dec_info;
476 	int ret;
477 	struct vpu_rect rect_info;
478 	u32 val;
479 	u32 decoded_index;
480 	u32 disp_idx;
481 	u32 max_dec_index;
482 	struct vpu_device *vpu_dev = inst->dev;
483 	struct dec_output_info *disp_info;
484 
485 	if (!info)
486 		return -EINVAL;
487 
488 	p_dec_info = &inst->codec_info->dec_info;
489 
490 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
491 	if (ret)
492 		return ret;
493 
494 	memset(info, 0, sizeof(*info));
495 
496 	ret = wave5_vpu_dec_get_result(inst, info);
497 	if (ret) {
498 		info->rd_ptr = p_dec_info->stream_rd_ptr;
499 		info->wr_ptr = p_dec_info->stream_wr_ptr;
500 		goto err_out;
501 	}
502 
503 	decoded_index = info->index_frame_decoded;
504 
505 	/* calculate display frame region */
506 	val = 0;
507 	rect_info.left = 0;
508 	rect_info.right = 0;
509 	rect_info.top = 0;
510 	rect_info.bottom = 0;
511 
512 	if (decoded_index < WAVE5_MAX_FBS) {
513 		if (inst->std == W_HEVC_DEC || inst->std == W_AVC_DEC)
514 			rect_info = p_dec_info->initial_info.pic_crop_rect;
515 
516 		if (inst->std == W_HEVC_DEC)
517 			p_dec_info->dec_out_info[decoded_index].decoded_poc = info->decoded_poc;
518 
519 		p_dec_info->dec_out_info[decoded_index].rc_decoded = rect_info;
520 	}
521 	info->rc_decoded = rect_info;
522 
523 	disp_idx = info->index_frame_display;
524 	if (info->index_frame_display >= 0 && info->index_frame_display < WAVE5_MAX_FBS) {
525 		disp_info = &p_dec_info->dec_out_info[disp_idx];
526 		if (info->index_frame_display != info->index_frame_decoded) {
527 			/*
528 			 * when index_frame_decoded < 0, and index_frame_display >= 0
529 			 * info->dec_pic_width and info->dec_pic_height are still valid
530 			 * but those of p_dec_info->dec_out_info[disp_idx] are invalid in VP9
531 			 */
532 			info->disp_pic_width = disp_info->dec_pic_width;
533 			info->disp_pic_height = disp_info->dec_pic_height;
534 		} else {
535 			info->disp_pic_width = info->dec_pic_width;
536 			info->disp_pic_height = info->dec_pic_height;
537 		}
538 
539 		info->rc_display = disp_info->rc_decoded;
540 
541 	} else {
542 		info->rc_display.left = 0;
543 		info->rc_display.right = 0;
544 		info->rc_display.top = 0;
545 		info->rc_display.bottom = 0;
546 		info->disp_pic_width = 0;
547 		info->disp_pic_height = 0;
548 	}
549 
550 	p_dec_info->stream_rd_ptr = wave5_dec_get_rd_ptr(inst);
551 	p_dec_info->frame_display_flag = vpu_read_reg(vpu_dev, W5_RET_DEC_DISP_IDC);
552 
553 	val = p_dec_info->num_of_decoding_fbs; //fb_offset
554 
555 	max_dec_index = (p_dec_info->num_of_decoding_fbs > p_dec_info->num_of_display_fbs) ?
556 		p_dec_info->num_of_decoding_fbs : p_dec_info->num_of_display_fbs;
557 
558 	if (info->index_frame_display >= 0 &&
559 	    info->index_frame_display < (int)max_dec_index)
560 		info->disp_frame = inst->frame_buf[val + info->index_frame_display];
561 
562 	info->rd_ptr = p_dec_info->stream_rd_ptr;
563 	info->wr_ptr = p_dec_info->stream_wr_ptr;
564 	info->frame_display_flag = p_dec_info->frame_display_flag;
565 
566 	info->sequence_no = p_dec_info->initial_info.sequence_no;
567 	if (decoded_index < WAVE5_MAX_FBS)
568 		p_dec_info->dec_out_info[decoded_index] = *info;
569 
570 	if (disp_idx < WAVE5_MAX_FBS)
571 		info->disp_frame.sequence_no = info->sequence_no;
572 
573 	if (info->sequence_changed) {
574 		memcpy((void *)&p_dec_info->initial_info, (void *)&p_dec_info->new_seq_info,
575 		       sizeof(struct dec_initial_info));
576 		p_dec_info->initial_info.sequence_no++;
577 	}
578 
579 err_out:
580 	mutex_unlock(&vpu_dev->hw_lock);
581 
582 	return ret;
583 }
584 
585 int wave5_vpu_dec_clr_disp_flag(struct vpu_instance *inst, int index)
586 {
587 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
588 	int ret;
589 	struct vpu_device *vpu_dev = inst->dev;
590 
591 	if (index >= p_dec_info->num_of_display_fbs)
592 		return -EINVAL;
593 
594 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
595 	if (ret)
596 		return ret;
597 	ret = wave5_dec_clr_disp_flag(inst, index);
598 	mutex_unlock(&vpu_dev->hw_lock);
599 
600 	return ret;
601 }
602 
603 int wave5_vpu_dec_set_disp_flag(struct vpu_instance *inst, int index)
604 {
605 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
606 	int ret = 0;
607 	struct vpu_device *vpu_dev = inst->dev;
608 
609 	if (index >= p_dec_info->num_of_display_fbs)
610 		return -EINVAL;
611 
612 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
613 	if (ret)
614 		return ret;
615 	ret = wave5_dec_set_disp_flag(inst, index);
616 	mutex_unlock(&vpu_dev->hw_lock);
617 
618 	return ret;
619 }
620 
621 int wave5_vpu_dec_reset_framebuffer(struct vpu_instance *inst, unsigned int index)
622 {
623 	if (index >= MAX_REG_FRAME)
624 		return -EINVAL;
625 
626 	if (inst->frame_vbuf[index].size == 0)
627 		return -EINVAL;
628 
629 	wave5_vdi_free_dma_memory(inst->dev, &inst->frame_vbuf[index]);
630 
631 	return 0;
632 }
633 
634 int wave5_vpu_dec_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter)
635 {
636 	struct dec_info *p_dec_info = &inst->codec_info->dec_info;
637 	int ret = 0;
638 
639 	switch (cmd) {
640 	case DEC_GET_QUEUE_STATUS: {
641 		struct queue_status_info *queue_info = parameter;
642 
643 		queue_info->instance_queue_count = p_dec_info->instance_queue_count;
644 		queue_info->report_queue_count = p_dec_info->report_queue_count;
645 		break;
646 	}
647 	case DEC_RESET_FRAMEBUF_INFO: {
648 		int i;
649 
650 		for (i = 0; i < MAX_REG_FRAME; i++) {
651 			ret = wave5_vpu_dec_reset_framebuffer(inst, i);
652 			if (ret)
653 				break;
654 		}
655 
656 		for (i = 0; i < MAX_REG_FRAME; i++) {
657 			ret = reset_auxiliary_buffers(inst, i);
658 			if (ret)
659 				break;
660 		}
661 
662 		wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_task);
663 		break;
664 	}
665 	case DEC_GET_SEQ_INFO: {
666 		struct dec_initial_info *seq_info = parameter;
667 
668 		*seq_info = p_dec_info->initial_info;
669 		break;
670 	}
671 
672 	default:
673 		return -EINVAL;
674 	}
675 
676 	return ret;
677 }
678 
679 int wave5_vpu_enc_open(struct vpu_instance *inst, struct enc_open_param *open_param)
680 {
681 	struct enc_info *p_enc_info;
682 	int ret;
683 	struct vpu_device *vpu_dev = inst->dev;
684 
685 	ret = wave5_vpu_enc_check_open_param(inst, open_param);
686 	if (ret)
687 		return ret;
688 
689 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
690 	if (ret)
691 		return ret;
692 
693 	if (!wave5_vpu_is_init(vpu_dev)) {
694 		mutex_unlock(&vpu_dev->hw_lock);
695 		return -ENODEV;
696 	}
697 
698 	p_enc_info = &inst->codec_info->enc_info;
699 	p_enc_info->open_param = *open_param;
700 
701 	ret = wave5_vpu_build_up_enc_param(vpu_dev->dev, inst, open_param);
702 	mutex_unlock(&vpu_dev->hw_lock);
703 
704 	return ret;
705 }
706 
707 int wave5_vpu_enc_close(struct vpu_instance *inst, u32 *fail_res)
708 {
709 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
710 	int ret;
711 	int retry = 0;
712 	struct vpu_device *vpu_dev = inst->dev;
713 	int inst_count = 0;
714 	struct vpu_instance *inst_elm;
715 
716 	*fail_res = 0;
717 	if (!inst->codec_info)
718 		return -EINVAL;
719 
720 	pm_runtime_resume_and_get(inst->dev->dev);
721 
722 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
723 	if (ret) {
724 		pm_runtime_resume_and_get(inst->dev->dev);
725 		return ret;
726 	}
727 
728 	do {
729 		ret = wave5_vpu_enc_finish_seq(inst, fail_res);
730 		if (ret < 0 && *fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING) {
731 			dev_warn(inst->dev->dev, "enc_finish_seq timed out\n");
732 			pm_runtime_resume_and_get(inst->dev->dev);
733 			mutex_unlock(&vpu_dev->hw_lock);
734 			return ret;
735 		}
736 
737 		if (*fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING &&
738 		    retry++ >= MAX_FIRMWARE_CALL_RETRY) {
739 			pm_runtime_resume_and_get(inst->dev->dev);
740 			mutex_unlock(&vpu_dev->hw_lock);
741 			return -ETIMEDOUT;
742 		}
743 	} while (ret != 0);
744 
745 	dev_dbg(inst->dev->dev, "%s: enc_finish_seq complete\n", __func__);
746 
747 	wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_work);
748 
749 	if (inst->std == W_HEVC_ENC || inst->std == W_AVC_ENC) {
750 		wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_sub_sam_buf);
751 		wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_mv);
752 		wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_fbc_y_tbl);
753 		wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_fbc_c_tbl);
754 	}
755 
756 	wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_task);
757 
758 	list_for_each_entry(inst_elm, &vpu_dev->instances, list)
759 		inst_count++;
760 	if (inst_count == 1)
761 		pm_runtime_dont_use_autosuspend(vpu_dev->dev);
762 
763 	mutex_unlock(&vpu_dev->hw_lock);
764 	pm_runtime_put_sync(inst->dev->dev);
765 
766 	return 0;
767 }
768 
769 int wave5_vpu_enc_register_frame_buffer(struct vpu_instance *inst, unsigned int num,
770 					unsigned int stride, int height,
771 					enum tiled_map_type map_type)
772 {
773 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
774 	int ret;
775 	struct vpu_device *vpu_dev = inst->dev;
776 	unsigned int size_luma, size_chroma;
777 	int i;
778 
779 	if (p_enc_info->stride)
780 		return -EINVAL;
781 
782 	if (!p_enc_info->initial_info_obtained)
783 		return -EINVAL;
784 
785 	if (num < p_enc_info->initial_info.min_frame_buffer_count)
786 		return -EINVAL;
787 
788 	if (stride == 0 || stride % 8 != 0)
789 		return -EINVAL;
790 
791 	if (height <= 0)
792 		return -EINVAL;
793 
794 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
795 	if (ret)
796 		return ret;
797 
798 	p_enc_info->num_frame_buffers = num;
799 	p_enc_info->stride = stride;
800 
801 	size_luma = stride * height;
802 	size_chroma = ALIGN(stride / 2, 16) * height;
803 
804 	for (i = 0; i < num; i++) {
805 		if (!inst->frame_buf[i].update_fb_info)
806 			continue;
807 
808 		inst->frame_buf[i].update_fb_info = false;
809 		inst->frame_buf[i].stride = stride;
810 		inst->frame_buf[i].height = height;
811 		inst->frame_buf[i].map_type = COMPRESSED_FRAME_MAP;
812 		inst->frame_buf[i].buf_y_size = size_luma;
813 		inst->frame_buf[i].buf_cb = inst->frame_buf[i].buf_y + size_luma;
814 		inst->frame_buf[i].buf_cb_size = size_chroma;
815 		inst->frame_buf[i].buf_cr_size = 0;
816 	}
817 
818 	ret = wave5_vpu_enc_register_framebuffer(inst->dev->dev, inst, &inst->frame_buf[0],
819 						 COMPRESSED_FRAME_MAP,
820 						 p_enc_info->num_frame_buffers);
821 
822 	mutex_unlock(&vpu_dev->hw_lock);
823 
824 	return ret;
825 }
826 
827 static int wave5_check_enc_param(struct vpu_instance *inst, struct enc_param *param)
828 {
829 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
830 
831 	if (!param)
832 		return -EINVAL;
833 
834 	if (!param->source_frame)
835 		return -EINVAL;
836 
837 	if (p_enc_info->open_param.bit_rate == 0 && inst->std == W_HEVC_ENC) {
838 		if (param->pic_stream_buffer_addr % 16 || param->pic_stream_buffer_size == 0)
839 			return -EINVAL;
840 	}
841 	if (param->pic_stream_buffer_addr % 8 || param->pic_stream_buffer_size == 0)
842 		return -EINVAL;
843 
844 	return 0;
845 }
846 
847 int wave5_vpu_enc_start_one_frame(struct vpu_instance *inst, struct enc_param *param, u32 *fail_res)
848 {
849 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
850 	int ret;
851 	struct vpu_device *vpu_dev = inst->dev;
852 
853 	*fail_res = 0;
854 
855 	if (p_enc_info->stride == 0) /* this means frame buffers have not been registered. */
856 		return -EINVAL;
857 
858 	ret = wave5_check_enc_param(inst, param);
859 	if (ret)
860 		return ret;
861 
862 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
863 	if (ret)
864 		return ret;
865 
866 	p_enc_info->pts_map[param->src_idx] = param->pts;
867 
868 	ret = wave5_vpu_encode(inst, param, fail_res);
869 
870 	mutex_unlock(&vpu_dev->hw_lock);
871 
872 	return ret;
873 }
874 
875 int wave5_vpu_enc_get_output_info(struct vpu_instance *inst, struct enc_output_info *info)
876 {
877 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
878 	int ret;
879 	struct vpu_device *vpu_dev = inst->dev;
880 
881 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
882 	if (ret)
883 		return ret;
884 
885 	ret = wave5_vpu_enc_get_result(inst, info);
886 	if (ret) {
887 		info->pts = 0;
888 		goto unlock;
889 	}
890 
891 	if (info->recon_frame_index >= 0)
892 		info->pts = p_enc_info->pts_map[info->enc_src_idx];
893 
894 unlock:
895 	mutex_unlock(&vpu_dev->hw_lock);
896 
897 	return ret;
898 }
899 
900 int wave5_vpu_enc_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter)
901 {
902 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
903 
904 	switch (cmd) {
905 	case ENABLE_ROTATION:
906 		p_enc_info->rotation_enable = true;
907 		break;
908 	case ENABLE_MIRRORING:
909 		p_enc_info->mirror_enable = true;
910 		break;
911 	case SET_MIRROR_DIRECTION: {
912 		enum mirror_direction mir_dir;
913 
914 		mir_dir = *(enum mirror_direction *)parameter;
915 		if (mir_dir != MIRDIR_NONE && mir_dir != MIRDIR_HOR &&
916 		    mir_dir != MIRDIR_VER && mir_dir != MIRDIR_HOR_VER)
917 			return -EINVAL;
918 		p_enc_info->mirror_direction = mir_dir;
919 		break;
920 	}
921 	case SET_ROTATION_ANGLE: {
922 		int angle;
923 
924 		angle = *(int *)parameter;
925 		if (angle && angle != 90 && angle != 180 && angle != 270)
926 			return -EINVAL;
927 		if (p_enc_info->initial_info_obtained && (angle == 90 || angle == 270))
928 			return -EINVAL;
929 		p_enc_info->rotation_angle = angle;
930 		break;
931 	}
932 	case ENC_GET_QUEUE_STATUS: {
933 		struct queue_status_info *queue_info = parameter;
934 
935 		queue_info->instance_queue_count = p_enc_info->instance_queue_count;
936 		queue_info->report_queue_count = p_enc_info->report_queue_count;
937 		break;
938 	}
939 	default:
940 		return -EINVAL;
941 	}
942 	return 0;
943 }
944 
945 int wave5_vpu_enc_issue_seq_init(struct vpu_instance *inst)
946 {
947 	int ret;
948 	struct vpu_device *vpu_dev = inst->dev;
949 
950 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
951 	if (ret)
952 		return ret;
953 
954 	ret = wave5_vpu_enc_init_seq(inst);
955 
956 	mutex_unlock(&vpu_dev->hw_lock);
957 
958 	return ret;
959 }
960 
961 int wave5_vpu_enc_complete_seq_init(struct vpu_instance *inst, struct enc_initial_info *info)
962 {
963 	struct enc_info *p_enc_info = &inst->codec_info->enc_info;
964 	int ret;
965 	struct vpu_device *vpu_dev = inst->dev;
966 
967 	if (!info)
968 		return -EINVAL;
969 
970 	ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
971 	if (ret)
972 		return ret;
973 
974 	ret = wave5_vpu_enc_get_seq_info(inst, info);
975 	if (ret) {
976 		p_enc_info->initial_info_obtained = false;
977 		mutex_unlock(&vpu_dev->hw_lock);
978 		return ret;
979 	}
980 
981 	p_enc_info->initial_info_obtained = true;
982 	p_enc_info->initial_info = *info;
983 
984 	mutex_unlock(&vpu_dev->hw_lock);
985 
986 	return 0;
987 }
988