xref: /linux/drivers/media/platform/qcom/venus/hfi_venus.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2017 Linaro Ltd.
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 
15 #include "core.h"
16 #include "hfi_cmds.h"
17 #include "hfi_msgs.h"
18 #include "hfi_venus.h"
19 #include "hfi_venus_io.h"
20 #include "firmware.h"
21 
22 #define HFI_MASK_QHDR_TX_TYPE		0xff000000
23 #define HFI_MASK_QHDR_RX_TYPE		0x00ff0000
24 #define HFI_MASK_QHDR_PRI_TYPE		0x0000ff00
25 #define HFI_MASK_QHDR_ID_TYPE		0x000000ff
26 
27 #define HFI_HOST_TO_CTRL_CMD_Q		0
28 #define HFI_CTRL_TO_HOST_MSG_Q		1
29 #define HFI_CTRL_TO_HOST_DBG_Q		2
30 #define HFI_MASK_QHDR_STATUS		0x000000ff
31 
32 #define IFACEQ_NUM			3
33 #define IFACEQ_CMD_IDX			0
34 #define IFACEQ_MSG_IDX			1
35 #define IFACEQ_DBG_IDX			2
36 #define IFACEQ_MAX_BUF_COUNT		50
37 #define IFACEQ_MAX_PARALLEL_CLNTS	16
38 #define IFACEQ_DFLT_QHDR		0x01010000
39 
40 #define POLL_INTERVAL_US		50
41 
42 #define IFACEQ_MAX_PKT_SIZE		1024
43 #define IFACEQ_MED_PKT_SIZE		768
44 #define IFACEQ_MIN_PKT_SIZE		8
45 #define IFACEQ_VAR_SMALL_PKT_SIZE	100
46 #define IFACEQ_VAR_LARGE_PKT_SIZE	512
47 #define IFACEQ_VAR_HUGE_PKT_SIZE	(1024 * 12)
48 
49 struct hfi_queue_table_header {
50 	u32 version;
51 	u32 size;
52 	u32 qhdr0_offset;
53 	u32 qhdr_size;
54 	u32 num_q;
55 	u32 num_active_q;
56 };
57 
58 struct hfi_queue_header {
59 	u32 status;
60 	u32 start_addr;
61 	u32 type;
62 	u32 q_size;
63 	u32 pkt_size;
64 	u32 pkt_drop_cnt;
65 	u32 rx_wm;
66 	u32 tx_wm;
67 	u32 rx_req;
68 	u32 tx_req;
69 	u32 rx_irq_status;
70 	u32 tx_irq_status;
71 	u32 read_idx;
72 	u32 write_idx;
73 };
74 
75 #define IFACEQ_TABLE_SIZE	\
76 	(sizeof(struct hfi_queue_table_header) +	\
77 	 sizeof(struct hfi_queue_header) * IFACEQ_NUM)
78 
79 #define IFACEQ_QUEUE_SIZE	(IFACEQ_MAX_PKT_SIZE *	\
80 	IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
81 
82 #define IFACEQ_GET_QHDR_START_ADDR(ptr, i)	\
83 	(void *)(((ptr) + sizeof(struct hfi_queue_table_header)) +	\
84 		((i) * sizeof(struct hfi_queue_header)))
85 
86 #define QDSS_SIZE		SZ_4K
87 #define SFR_SIZE		SZ_4K
88 #define QUEUE_SIZE		\
89 	(IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
90 
91 #define ALIGNED_QDSS_SIZE	ALIGN(QDSS_SIZE, SZ_4K)
92 #define ALIGNED_SFR_SIZE	ALIGN(SFR_SIZE, SZ_4K)
93 #define ALIGNED_QUEUE_SIZE	ALIGN(QUEUE_SIZE, SZ_4K)
94 #define SHARED_QSIZE		ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
95 				      ALIGNED_QDSS_SIZE, SZ_1M)
96 
97 struct mem_desc {
98 	dma_addr_t da;	/* device address */
99 	void *kva;	/* kernel virtual address */
100 	u32 size;
101 	unsigned long attrs;
102 };
103 
104 struct iface_queue {
105 	struct hfi_queue_header *qhdr;
106 	struct mem_desc qmem;
107 };
108 
109 enum venus_state {
110 	VENUS_STATE_DEINIT = 1,
111 	VENUS_STATE_INIT,
112 };
113 
114 struct venus_hfi_device {
115 	struct venus_core *core;
116 	u32 irq_status;
117 	u32 last_packet_type;
118 	bool power_enabled;
119 	bool suspended;
120 	enum venus_state state;
121 	/* serialize read / write to the shared memory */
122 	struct mutex lock;
123 	struct completion pwr_collapse_prep;
124 	struct completion release_resource;
125 	struct mem_desc ifaceq_table;
126 	struct mem_desc sfr;
127 	struct iface_queue queues[IFACEQ_NUM];
128 	u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
129 	u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
130 };
131 
132 static bool venus_pkt_debug;
133 int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
134 static bool venus_fw_low_power_mode = true;
135 static int venus_hw_rsp_timeout = 1000;
136 static bool venus_fw_coverage;
137 
138 static void venus_set_state(struct venus_hfi_device *hdev,
139 			    enum venus_state state)
140 {
141 	mutex_lock(&hdev->lock);
142 	hdev->state = state;
143 	mutex_unlock(&hdev->lock);
144 }
145 
146 static bool venus_is_valid_state(struct venus_hfi_device *hdev)
147 {
148 	return hdev->state != VENUS_STATE_DEINIT;
149 }
150 
151 static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
152 {
153 	size_t pkt_size = *(u32 *)packet;
154 
155 	if (!venus_pkt_debug)
156 		return;
157 
158 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
159 		       pkt_size, true);
160 }
161 
162 static int venus_write_queue(struct venus_hfi_device *hdev,
163 			     struct iface_queue *queue,
164 			     void *packet, u32 *rx_req)
165 {
166 	struct hfi_queue_header *qhdr;
167 	u32 dwords, new_wr_idx;
168 	u32 empty_space, rd_idx, wr_idx, qsize;
169 	u32 *wr_ptr;
170 
171 	if (!queue->qmem.kva)
172 		return -EINVAL;
173 
174 	qhdr = queue->qhdr;
175 	if (!qhdr)
176 		return -EINVAL;
177 
178 	venus_dump_packet(hdev, packet);
179 
180 	dwords = (*(u32 *)packet) >> 2;
181 	if (!dwords)
182 		return -EINVAL;
183 
184 	rd_idx = qhdr->read_idx;
185 	wr_idx = qhdr->write_idx;
186 	qsize = qhdr->q_size;
187 	/* ensure rd/wr indices's are read from memory */
188 	rmb();
189 
190 	if (qsize > IFACEQ_QUEUE_SIZE / 4)
191 		return -EINVAL;
192 
193 	if (wr_idx >= rd_idx)
194 		empty_space = qsize - (wr_idx - rd_idx);
195 	else
196 		empty_space = rd_idx - wr_idx;
197 
198 	if (empty_space <= dwords) {
199 		qhdr->tx_req = 1;
200 		/* ensure tx_req is updated in memory */
201 		wmb();
202 		return -ENOSPC;
203 	}
204 
205 	qhdr->tx_req = 0;
206 	/* ensure tx_req is updated in memory */
207 	wmb();
208 
209 	new_wr_idx = wr_idx + dwords;
210 	wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
211 
212 	if (wr_ptr < (u32 *)queue->qmem.kva ||
213 	    wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
214 		return -EINVAL;
215 
216 	if (new_wr_idx < qsize) {
217 		memcpy(wr_ptr, packet, dwords << 2);
218 	} else {
219 		size_t len;
220 
221 		new_wr_idx -= qsize;
222 		len = (dwords - new_wr_idx) << 2;
223 		memcpy(wr_ptr, packet, len);
224 		memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
225 	}
226 
227 	/* make sure packet is written before updating the write index */
228 	wmb();
229 
230 	qhdr->write_idx = new_wr_idx;
231 	*rx_req = qhdr->rx_req ? 1 : 0;
232 
233 	/* make sure write index is updated before an interrupt is raised */
234 	mb();
235 
236 	return 0;
237 }
238 
239 static int venus_read_queue(struct venus_hfi_device *hdev,
240 			    struct iface_queue *queue, void *pkt, u32 *tx_req)
241 {
242 	struct hfi_pkt_hdr *pkt_hdr = NULL;
243 	struct hfi_queue_header *qhdr;
244 	u32 dwords, new_rd_idx;
245 	u32 rd_idx, wr_idx, type, qsize;
246 	u32 *rd_ptr;
247 	u32 recv_request = 0;
248 	int ret = 0;
249 
250 	if (!queue->qmem.kva)
251 		return -EINVAL;
252 
253 	qhdr = queue->qhdr;
254 	if (!qhdr)
255 		return -EINVAL;
256 
257 	type = qhdr->type;
258 	rd_idx = qhdr->read_idx;
259 	wr_idx = qhdr->write_idx;
260 	qsize = qhdr->q_size;
261 
262 	if (qsize > IFACEQ_QUEUE_SIZE / 4)
263 		return -EINVAL;
264 
265 	/* make sure data is valid before using it */
266 	rmb();
267 
268 	/*
269 	 * Do not set receive request for debug queue, if set, Venus generates
270 	 * interrupt for debug messages even when there is no response message
271 	 * available. In general debug queue will not become full as it is being
272 	 * emptied out for every interrupt from Venus. Venus will anyway
273 	 * generates interrupt if it is full.
274 	 */
275 	if (type & HFI_CTRL_TO_HOST_MSG_Q)
276 		recv_request = 1;
277 
278 	if (rd_idx == wr_idx) {
279 		qhdr->rx_req = recv_request;
280 		*tx_req = 0;
281 		/* update rx_req field in memory */
282 		wmb();
283 		return -ENODATA;
284 	}
285 
286 	rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
287 
288 	if (rd_ptr < (u32 *)queue->qmem.kva ||
289 	    rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
290 		return -EINVAL;
291 
292 	dwords = *rd_ptr >> 2;
293 	if (!dwords)
294 		return -EINVAL;
295 
296 	new_rd_idx = rd_idx + dwords;
297 	if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
298 		if (new_rd_idx < qsize) {
299 			memcpy(pkt, rd_ptr, dwords << 2);
300 		} else {
301 			size_t len;
302 
303 			new_rd_idx -= qsize;
304 			len = (dwords - new_rd_idx) << 2;
305 			memcpy(pkt, rd_ptr, len);
306 			memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
307 		}
308 		pkt_hdr = (struct hfi_pkt_hdr *)(pkt);
309 		if ((pkt_hdr->size >> 2) != dwords)
310 			return -EINVAL;
311 	} else {
312 		/* bad packet received, dropping */
313 		new_rd_idx = qhdr->write_idx;
314 		ret = -EBADMSG;
315 	}
316 
317 	/* ensure the packet is read before updating read index */
318 	rmb();
319 
320 	qhdr->read_idx = new_rd_idx;
321 	/* ensure updating read index */
322 	wmb();
323 
324 	rd_idx = qhdr->read_idx;
325 	wr_idx = qhdr->write_idx;
326 	/* ensure rd/wr indices are read from memory */
327 	rmb();
328 
329 	if (rd_idx != wr_idx)
330 		qhdr->rx_req = 0;
331 	else
332 		qhdr->rx_req = recv_request;
333 
334 	*tx_req = qhdr->tx_req ? 1 : 0;
335 
336 	/* ensure rx_req is stored to memory and tx_req is loaded from memory */
337 	mb();
338 
339 	venus_dump_packet(hdev, pkt);
340 
341 	return ret;
342 }
343 
344 static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
345 		       u32 size)
346 {
347 	struct device *dev = hdev->core->dev;
348 
349 	desc->attrs = DMA_ATTR_WRITE_COMBINE;
350 	desc->size = ALIGN(size, SZ_4K);
351 
352 	desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
353 				    desc->attrs);
354 	if (!desc->kva)
355 		return -ENOMEM;
356 
357 	return 0;
358 }
359 
360 static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
361 {
362 	struct device *dev = hdev->core->dev;
363 
364 	dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
365 }
366 
367 static void venus_set_registers(struct venus_hfi_device *hdev)
368 {
369 	const struct venus_resources *res = hdev->core->res;
370 	const struct reg_val *tbl = res->reg_tbl;
371 	unsigned int count = res->reg_tbl_size;
372 	unsigned int i;
373 
374 	for (i = 0; i < count; i++)
375 		writel(tbl[i].value, hdev->core->base + tbl[i].reg);
376 }
377 
378 static void venus_soft_int(struct venus_hfi_device *hdev)
379 {
380 	void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
381 	u32 clear_bit;
382 
383 	if (IS_V6(hdev->core) || (IS_V4(hdev->core) && is_lite(hdev->core)))
384 		clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
385 	else
386 		clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
387 
388 	writel(clear_bit, cpu_ic_base + CPU_IC_SOFTINT);
389 }
390 
391 static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
392 					 void *pkt, bool sync)
393 {
394 	struct device *dev = hdev->core->dev;
395 	struct hfi_pkt_hdr *cmd_packet;
396 	struct iface_queue *queue;
397 	u32 rx_req;
398 	int ret;
399 
400 	if (!venus_is_valid_state(hdev))
401 		return -EINVAL;
402 
403 	cmd_packet = (struct hfi_pkt_hdr *)pkt;
404 	hdev->last_packet_type = cmd_packet->pkt_type;
405 
406 	queue = &hdev->queues[IFACEQ_CMD_IDX];
407 
408 	ret = venus_write_queue(hdev, queue, pkt, &rx_req);
409 	if (ret) {
410 		dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
411 		return ret;
412 	}
413 
414 	if (sync) {
415 		/*
416 		 * Inform video hardware to raise interrupt for synchronous
417 		 * commands
418 		 */
419 		queue = &hdev->queues[IFACEQ_MSG_IDX];
420 		queue->qhdr->rx_req = 1;
421 		/* ensure rx_req is updated in memory */
422 		wmb();
423 	}
424 
425 	if (rx_req)
426 		venus_soft_int(hdev);
427 
428 	return 0;
429 }
430 
431 static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync)
432 {
433 	int ret;
434 
435 	mutex_lock(&hdev->lock);
436 	ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync);
437 	mutex_unlock(&hdev->lock);
438 
439 	return ret;
440 }
441 
442 static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
443 				       u32 size, u32 addr, void *cookie)
444 {
445 	struct venus_hfi_device *hdev = to_hfi_priv(core);
446 	struct hfi_sys_set_resource_pkt *pkt;
447 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
448 	int ret;
449 
450 	if (id == VIDC_RESOURCE_NONE)
451 		return 0;
452 
453 	pkt = (struct hfi_sys_set_resource_pkt *)packet;
454 
455 	ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
456 	if (ret)
457 		return ret;
458 
459 	ret = venus_iface_cmdq_write(hdev, pkt, false);
460 	if (ret)
461 		return ret;
462 
463 	return 0;
464 }
465 
466 static int venus_boot_core(struct venus_hfi_device *hdev)
467 {
468 	struct device *dev = hdev->core->dev;
469 	static const unsigned int max_tries = 100;
470 	u32 ctrl_status = 0, mask_val = 0;
471 	unsigned int count = 0;
472 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
473 	void __iomem *wrapper_base = hdev->core->wrapper_base;
474 	int ret = 0;
475 
476 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
477 		mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
478 		mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
479 			      WRAPPER_INTR_MASK_A2HCPU_MASK);
480 	} else {
481 		mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
482 	}
483 
484 	writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
485 	if (IS_V1(hdev->core))
486 		writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
487 
488 	writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
489 	while (!ctrl_status && count < max_tries) {
490 		ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
491 		if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
492 			dev_err(dev, "invalid setting for UC_REGION\n");
493 			ret = -EINVAL;
494 			break;
495 		}
496 
497 		usleep_range(500, 1000);
498 		count++;
499 	}
500 
501 	if (count >= max_tries)
502 		ret = -ETIMEDOUT;
503 
504 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core)) {
505 		writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
506 
507 		if (!IS_AR50_LITE(hdev->core))
508 			writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
509 	}
510 
511 	return ret;
512 }
513 
514 static u32 venus_hwversion(struct venus_hfi_device *hdev)
515 {
516 	struct device *dev = hdev->core->dev;
517 	void __iomem *wrapper_base = hdev->core->wrapper_base;
518 	u32 ver;
519 	u32 major, minor, step;
520 
521 	ver = readl(wrapper_base + WRAPPER_HW_VERSION);
522 	major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
523 	major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
524 	minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
525 	minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
526 	step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
527 
528 	dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
529 
530 	return major;
531 }
532 
533 static int venus_run(struct venus_hfi_device *hdev)
534 {
535 	struct device *dev = hdev->core->dev;
536 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
537 	int ret;
538 
539 	/*
540 	 * Re-program all of the registers that get reset as a result of
541 	 * regulator_disable() and _enable()
542 	 */
543 	venus_set_registers(hdev);
544 
545 	writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
546 	writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
547 	writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
548 	writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
549 	if (hdev->sfr.da)
550 		writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
551 
552 	ret = venus_boot_core(hdev);
553 	if (ret) {
554 		dev_err(dev, "failed to reset venus core\n");
555 		return ret;
556 	}
557 
558 	venus_hwversion(hdev);
559 
560 	return 0;
561 }
562 
563 static int venus_halt_axi(struct venus_hfi_device *hdev)
564 {
565 	void __iomem *wrapper_base = hdev->core->wrapper_base;
566 	void __iomem *vbif_base = hdev->core->vbif_base;
567 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
568 	void __iomem *aon_base = hdev->core->aon_base;
569 	struct device *dev = hdev->core->dev;
570 	u32 val;
571 	u32 mask_val;
572 	int ret;
573 
574 	if (IS_AR50_LITE(hdev->core))
575 		return 0;
576 
577 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
578 		writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
579 
580 		if (IS_IRIS2_1(hdev->core))
581 			goto skip_aon_mvp_noc;
582 
583 		writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
584 		ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
585 					 val,
586 					 val & BIT(0),
587 					 POLL_INTERVAL_US,
588 					 VBIF_AXI_HALT_ACK_TIMEOUT_US);
589 		if (ret)
590 			return -ETIMEDOUT;
591 
592 skip_aon_mvp_noc:
593 		mask_val = (BIT(2) | BIT(1) | BIT(0));
594 		writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
595 
596 		writel(0x00, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
597 		ret = readl_poll_timeout(wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6,
598 					 val,
599 					 val == 0,
600 					 POLL_INTERVAL_US,
601 					 VBIF_AXI_HALT_ACK_TIMEOUT_US);
602 
603 		if (ret) {
604 			dev_err(dev, "DBLP Release: lpi_status %x\n", val);
605 			return -ETIMEDOUT;
606 		}
607 		return 0;
608 	}
609 
610 	if (IS_V4(hdev->core)) {
611 		val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
612 		val |= WRAPPER_CPU_AXI_HALT_HALT;
613 		writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
614 
615 		ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
616 					 val,
617 					 val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
618 					 POLL_INTERVAL_US,
619 					 VBIF_AXI_HALT_ACK_TIMEOUT_US);
620 		if (ret) {
621 			dev_err(dev, "AXI bus port halt timeout\n");
622 			return ret;
623 		}
624 
625 		return 0;
626 	}
627 
628 	/* Halt AXI and AXI IMEM VBIF Access */
629 	val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
630 	val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
631 	writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
632 
633 	/* Request for AXI bus port halt */
634 	ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
635 				 val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
636 				 POLL_INTERVAL_US,
637 				 VBIF_AXI_HALT_ACK_TIMEOUT_US);
638 	if (ret) {
639 		dev_err(dev, "AXI bus port halt timeout\n");
640 		return ret;
641 	}
642 
643 	return 0;
644 }
645 
646 static int venus_power_off(struct venus_hfi_device *hdev)
647 {
648 	int ret;
649 
650 	if (!hdev->power_enabled)
651 		return 0;
652 
653 	ret = venus_set_hw_state_suspend(hdev->core);
654 	if (ret)
655 		return ret;
656 
657 	ret = venus_halt_axi(hdev);
658 	if (ret)
659 		return ret;
660 
661 	hdev->power_enabled = false;
662 
663 	return 0;
664 }
665 
666 static int venus_power_on(struct venus_hfi_device *hdev)
667 {
668 	int ret;
669 
670 	if (hdev->power_enabled)
671 		return 0;
672 
673 	ret = venus_set_hw_state_resume(hdev->core);
674 	if (ret)
675 		goto err;
676 
677 	ret = venus_run(hdev);
678 	if (ret)
679 		goto err_suspend;
680 
681 	hdev->power_enabled = true;
682 
683 	return 0;
684 
685 err_suspend:
686 	venus_set_hw_state_suspend(hdev->core);
687 err:
688 	hdev->power_enabled = false;
689 	return ret;
690 }
691 
692 static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
693 					void *pkt)
694 {
695 	struct iface_queue *queue;
696 	u32 tx_req;
697 	int ret;
698 
699 	if (!venus_is_valid_state(hdev))
700 		return -EINVAL;
701 
702 	queue = &hdev->queues[IFACEQ_MSG_IDX];
703 
704 	ret = venus_read_queue(hdev, queue, pkt, &tx_req);
705 	if (ret)
706 		return ret;
707 
708 	if (tx_req)
709 		venus_soft_int(hdev);
710 
711 	return 0;
712 }
713 
714 static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
715 {
716 	int ret;
717 
718 	mutex_lock(&hdev->lock);
719 	ret = venus_iface_msgq_read_nolock(hdev, pkt);
720 	mutex_unlock(&hdev->lock);
721 
722 	return ret;
723 }
724 
725 static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
726 					void *pkt)
727 {
728 	struct iface_queue *queue;
729 	u32 tx_req;
730 	int ret;
731 
732 	ret = venus_is_valid_state(hdev);
733 	if (!ret)
734 		return -EINVAL;
735 
736 	queue = &hdev->queues[IFACEQ_DBG_IDX];
737 
738 	ret = venus_read_queue(hdev, queue, pkt, &tx_req);
739 	if (ret)
740 		return ret;
741 
742 	if (tx_req)
743 		venus_soft_int(hdev);
744 
745 	return 0;
746 }
747 
748 static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
749 {
750 	int ret;
751 
752 	if (!pkt)
753 		return -EINVAL;
754 
755 	mutex_lock(&hdev->lock);
756 	ret = venus_iface_dbgq_read_nolock(hdev, pkt);
757 	mutex_unlock(&hdev->lock);
758 
759 	return ret;
760 }
761 
762 static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
763 {
764 	qhdr->status = 1;
765 	qhdr->type = IFACEQ_DFLT_QHDR;
766 	qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
767 	qhdr->pkt_size = 0;
768 	qhdr->rx_wm = 1;
769 	qhdr->tx_wm = 1;
770 	qhdr->rx_req = 1;
771 	qhdr->tx_req = 0;
772 	qhdr->rx_irq_status = 0;
773 	qhdr->tx_irq_status = 0;
774 	qhdr->read_idx = 0;
775 	qhdr->write_idx = 0;
776 }
777 
778 static void venus_interface_queues_release(struct venus_hfi_device *hdev)
779 {
780 	mutex_lock(&hdev->lock);
781 
782 	venus_free(hdev, &hdev->ifaceq_table);
783 	venus_free(hdev, &hdev->sfr);
784 
785 	memset(hdev->queues, 0, sizeof(hdev->queues));
786 	memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
787 	memset(&hdev->sfr, 0, sizeof(hdev->sfr));
788 
789 	mutex_unlock(&hdev->lock);
790 }
791 
792 static int venus_interface_queues_init(struct venus_hfi_device *hdev)
793 {
794 	struct hfi_queue_table_header *tbl_hdr;
795 	struct iface_queue *queue;
796 	struct hfi_sfr *sfr;
797 	struct mem_desc desc = {0};
798 	unsigned int offset;
799 	unsigned int i;
800 	int ret;
801 
802 	ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
803 	if (ret)
804 		return ret;
805 
806 	hdev->ifaceq_table = desc;
807 	offset = IFACEQ_TABLE_SIZE;
808 
809 	for (i = 0; i < IFACEQ_NUM; i++) {
810 		queue = &hdev->queues[i];
811 		queue->qmem.da = desc.da + offset;
812 		queue->qmem.kva = desc.kva + offset;
813 		queue->qmem.size = IFACEQ_QUEUE_SIZE;
814 		offset += queue->qmem.size;
815 		queue->qhdr =
816 			IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
817 
818 		venus_set_qhdr_defaults(queue->qhdr);
819 
820 		queue->qhdr->start_addr = queue->qmem.da;
821 
822 		if (i == IFACEQ_CMD_IDX)
823 			queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
824 		else if (i == IFACEQ_MSG_IDX)
825 			queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
826 		else if (i == IFACEQ_DBG_IDX)
827 			queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
828 	}
829 
830 	tbl_hdr = hdev->ifaceq_table.kva;
831 	tbl_hdr->version = 0;
832 	tbl_hdr->size = IFACEQ_TABLE_SIZE;
833 	tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
834 	tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
835 	tbl_hdr->num_q = IFACEQ_NUM;
836 	tbl_hdr->num_active_q = IFACEQ_NUM;
837 
838 	/*
839 	 * Set receive request to zero on debug queue as there is no
840 	 * need of interrupt from video hardware for debug messages
841 	 */
842 	queue = &hdev->queues[IFACEQ_DBG_IDX];
843 	queue->qhdr->rx_req = 0;
844 
845 	ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
846 	if (ret) {
847 		hdev->sfr.da = 0;
848 	} else {
849 		hdev->sfr = desc;
850 		sfr = hdev->sfr.kva;
851 		sfr->buf_size = ALIGNED_SFR_SIZE;
852 	}
853 
854 	/* ensure table and queue header structs are settled in memory */
855 	wmb();
856 
857 	return 0;
858 }
859 
860 static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
861 {
862 	struct hfi_sys_set_property_pkt *pkt;
863 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
864 
865 	pkt = (struct hfi_sys_set_property_pkt *)packet;
866 
867 	pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
868 
869 	return venus_iface_cmdq_write(hdev, pkt, false);
870 }
871 
872 static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
873 {
874 	struct hfi_sys_set_property_pkt *pkt;
875 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
876 
877 	pkt = (struct hfi_sys_set_property_pkt *)packet;
878 
879 	pkt_sys_coverage_config(pkt, mode);
880 
881 	return venus_iface_cmdq_write(hdev, pkt, false);
882 }
883 
884 static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
885 				      bool enable)
886 {
887 	struct hfi_sys_set_property_pkt *pkt;
888 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
889 
890 	if (!enable)
891 		return 0;
892 
893 	pkt = (struct hfi_sys_set_property_pkt *)packet;
894 
895 	pkt_sys_idle_indicator(pkt, enable);
896 
897 	return venus_iface_cmdq_write(hdev, pkt, false);
898 }
899 
900 static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
901 				       bool enable)
902 {
903 	struct hfi_sys_set_property_pkt *pkt;
904 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
905 
906 	pkt = (struct hfi_sys_set_property_pkt *)packet;
907 
908 	pkt_sys_power_control(pkt, enable);
909 
910 	return venus_iface_cmdq_write(hdev, pkt, false);
911 }
912 
913 static int venus_sys_set_ubwc_config(struct venus_hfi_device *hdev)
914 {
915 	struct hfi_sys_set_property_pkt *pkt;
916 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
917 	const struct venus_resources *res = hdev->core->res;
918 	int ret;
919 
920 	pkt = (struct hfi_sys_set_property_pkt *)packet;
921 
922 	pkt_sys_ubwc_config(pkt, res->ubwc_conf);
923 
924 	ret = venus_iface_cmdq_write(hdev, pkt, false);
925 	if (ret)
926 		return ret;
927 
928 	return 0;
929 }
930 
931 static int venus_get_queue_size(struct venus_hfi_device *hdev,
932 				unsigned int index)
933 {
934 	struct hfi_queue_header *qhdr;
935 
936 	if (index >= IFACEQ_NUM)
937 		return -EINVAL;
938 
939 	qhdr = hdev->queues[index].qhdr;
940 	if (!qhdr)
941 		return -EINVAL;
942 
943 	return abs(qhdr->read_idx - qhdr->write_idx);
944 }
945 
946 static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
947 {
948 	struct device *dev = hdev->core->dev;
949 	const struct venus_resources *res = hdev->core->res;
950 	int ret;
951 
952 	ret = venus_sys_set_debug(hdev, venus_fw_debug);
953 	if (ret)
954 		dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
955 
956 	/* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */
957 	if (IS_V1(hdev->core)) {
958 		ret = venus_sys_set_idle_message(hdev, false);
959 		if (ret)
960 			dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
961 	}
962 
963 	ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
964 	if (ret)
965 		dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
966 			 ret);
967 
968 	/* For specific venus core, it is mandatory to set the UBWC configuration */
969 	if (res->ubwc_conf) {
970 		ret = venus_sys_set_ubwc_config(hdev);
971 		if (ret)
972 			dev_warn(dev, "setting ubwc config failed (%d)\n", ret);
973 	}
974 
975 	return ret;
976 }
977 
978 static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync)
979 {
980 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
981 	struct hfi_session_pkt pkt;
982 
983 	pkt_session_cmd(&pkt, pkt_type, inst);
984 
985 	return venus_iface_cmdq_write(hdev, &pkt, sync);
986 }
987 
988 static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
989 {
990 	struct device *dev = hdev->core->dev;
991 	void *packet = hdev->dbg_buf;
992 
993 	while (!venus_iface_dbgq_read(hdev, packet)) {
994 		struct hfi_msg_sys_coverage_pkt *pkt = packet;
995 
996 		if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
997 			struct hfi_msg_sys_debug_pkt *pkt = packet;
998 
999 			dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
1000 		}
1001 	}
1002 }
1003 
1004 static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
1005 					bool wait)
1006 {
1007 	unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
1008 	struct hfi_sys_pc_prep_pkt pkt;
1009 	int ret;
1010 
1011 	init_completion(&hdev->pwr_collapse_prep);
1012 
1013 	pkt_sys_pc_prep(&pkt);
1014 
1015 	ret = venus_iface_cmdq_write(hdev, &pkt, false);
1016 	if (ret)
1017 		return ret;
1018 
1019 	if (!wait)
1020 		return 0;
1021 
1022 	ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
1023 	if (!ret) {
1024 		venus_flush_debug_queue(hdev);
1025 		return -ETIMEDOUT;
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 static int venus_are_queues_empty(struct venus_hfi_device *hdev)
1032 {
1033 	int ret1, ret2;
1034 
1035 	ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
1036 	if (ret1 < 0)
1037 		return ret1;
1038 
1039 	ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
1040 	if (ret2 < 0)
1041 		return ret2;
1042 
1043 	if (!ret1 && !ret2)
1044 		return 1;
1045 
1046 	return 0;
1047 }
1048 
1049 static void venus_sfr_print(struct venus_hfi_device *hdev)
1050 {
1051 	struct device *dev = hdev->core->dev;
1052 	struct hfi_sfr *sfr = hdev->sfr.kva;
1053 	u32 size;
1054 	void *p;
1055 
1056 	if (!sfr)
1057 		return;
1058 
1059 	size = sfr->buf_size;
1060 	if (!size)
1061 		return;
1062 
1063 	if (size > ALIGNED_SFR_SIZE)
1064 		size = ALIGNED_SFR_SIZE;
1065 
1066 	p = memchr(sfr->data, '\0', size);
1067 	/*
1068 	 * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
1069 	 * that Venus is in the process of crashing.
1070 	 */
1071 	if (!p)
1072 		sfr->data[size - 1] = '\0';
1073 
1074 	dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
1075 }
1076 
1077 static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
1078 					void *packet)
1079 {
1080 	struct hfi_msg_event_notify_pkt *event_pkt = packet;
1081 
1082 	if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
1083 		return;
1084 
1085 	venus_set_state(hdev, VENUS_STATE_DEINIT);
1086 
1087 	venus_sfr_print(hdev);
1088 }
1089 
1090 static irqreturn_t venus_isr_thread(struct venus_core *core)
1091 {
1092 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1093 	const struct venus_resources *res;
1094 	void *pkt;
1095 	u32 msg_ret;
1096 
1097 	if (!hdev)
1098 		return IRQ_NONE;
1099 
1100 	res = hdev->core->res;
1101 	pkt = hdev->pkt_buf;
1102 
1103 
1104 	while (!venus_iface_msgq_read(hdev, pkt)) {
1105 		msg_ret = hfi_process_msg_packet(core, pkt);
1106 		switch (msg_ret) {
1107 		case HFI_MSG_EVENT_NOTIFY:
1108 			venus_process_msg_sys_error(hdev, pkt);
1109 			break;
1110 		case HFI_MSG_SYS_INIT:
1111 			venus_hfi_core_set_resource(core, res->vmem_id,
1112 						    res->vmem_size,
1113 						    res->vmem_addr,
1114 						    hdev);
1115 			break;
1116 		case HFI_MSG_SYS_RELEASE_RESOURCE:
1117 			complete(&hdev->release_resource);
1118 			break;
1119 		case HFI_MSG_SYS_PC_PREP:
1120 			complete(&hdev->pwr_collapse_prep);
1121 			break;
1122 		default:
1123 			break;
1124 		}
1125 	}
1126 
1127 	venus_flush_debug_queue(hdev);
1128 
1129 	return IRQ_HANDLED;
1130 }
1131 
1132 static irqreturn_t venus_isr(struct venus_core *core)
1133 {
1134 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1135 	u32 status;
1136 	void __iomem *cpu_cs_base;
1137 	void __iomem *wrapper_base;
1138 
1139 	if (!hdev)
1140 		return IRQ_NONE;
1141 
1142 	cpu_cs_base = hdev->core->cpu_cs_base;
1143 	wrapper_base = hdev->core->wrapper_base;
1144 
1145 	status = readl(wrapper_base + WRAPPER_INTR_STATUS);
1146 
1147 	if (IS_AR50_LITE(core)) {
1148 		if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1149 		    status & WRAPPER_INTR_STATUS_A2HWD_MASK_V4_LITE ||
1150 		    status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1151 			hdev->irq_status = status;
1152 	} else if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
1153 		if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1154 		    status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
1155 		    status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1156 			hdev->irq_status = status;
1157 	} else {
1158 		if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1159 		    status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
1160 		    status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1161 			hdev->irq_status = status;
1162 	}
1163 	writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
1164 	if (!(IS_IRIS2(core) || IS_IRIS2_1(core) || IS_AR50_LITE(core)))
1165 		writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
1166 
1167 	return IRQ_WAKE_THREAD;
1168 }
1169 
1170 static int venus_core_init(struct venus_core *core)
1171 {
1172 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1173 	struct device *dev = core->dev;
1174 	struct hfi_sys_get_property_pkt version_pkt;
1175 	struct hfi_sys_init_pkt pkt;
1176 	int ret;
1177 
1178 	pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
1179 
1180 	venus_set_state(hdev, VENUS_STATE_INIT);
1181 
1182 	ret = venus_iface_cmdq_write(hdev, &pkt, false);
1183 	if (ret)
1184 		return ret;
1185 
1186 	pkt_sys_image_version(&version_pkt);
1187 
1188 	ret = venus_iface_cmdq_write(hdev, &version_pkt, false);
1189 	if (ret)
1190 		dev_warn(dev, "failed to send image version pkt to fw\n");
1191 
1192 	ret = venus_sys_set_default_properties(hdev);
1193 	if (ret)
1194 		return ret;
1195 
1196 	return 0;
1197 }
1198 
1199 static int venus_core_deinit(struct venus_core *core)
1200 {
1201 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1202 
1203 	venus_set_state(hdev, VENUS_STATE_DEINIT);
1204 	hdev->suspended = true;
1205 	hdev->power_enabled = false;
1206 
1207 	return 0;
1208 }
1209 
1210 static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
1211 {
1212 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1213 	struct hfi_sys_test_ssr_pkt pkt;
1214 	int ret;
1215 
1216 	ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
1217 	if (ret)
1218 		return ret;
1219 
1220 	return venus_iface_cmdq_write(hdev, &pkt, false);
1221 }
1222 
1223 static int venus_session_init(struct venus_inst *inst, u32 session_type,
1224 			      u32 codec)
1225 {
1226 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1227 	struct hfi_session_init_pkt pkt;
1228 	int ret;
1229 
1230 	ret = venus_sys_set_debug(hdev, venus_fw_debug);
1231 	if (ret)
1232 		goto err;
1233 
1234 	ret = pkt_session_init(&pkt, inst, session_type, codec);
1235 	if (ret)
1236 		goto err;
1237 
1238 	ret = venus_iface_cmdq_write(hdev, &pkt, true);
1239 	if (ret)
1240 		goto err;
1241 
1242 	return 0;
1243 
1244 err:
1245 	venus_flush_debug_queue(hdev);
1246 	return ret;
1247 }
1248 
1249 static int venus_session_end(struct venus_inst *inst)
1250 {
1251 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1252 	struct device *dev = hdev->core->dev;
1253 
1254 	if (venus_fw_coverage) {
1255 		if (venus_sys_set_coverage(hdev, venus_fw_coverage))
1256 			dev_warn(dev, "fw coverage msg ON failed\n");
1257 	}
1258 
1259 	return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true);
1260 }
1261 
1262 static int venus_session_abort(struct venus_inst *inst)
1263 {
1264 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1265 
1266 	venus_flush_debug_queue(hdev);
1267 
1268 	return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true);
1269 }
1270 
1271 static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
1272 {
1273 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1274 	struct hfi_session_flush_pkt pkt;
1275 	int ret;
1276 
1277 	ret = pkt_session_flush(&pkt, inst, flush_mode);
1278 	if (ret)
1279 		return ret;
1280 
1281 	return venus_iface_cmdq_write(hdev, &pkt, true);
1282 }
1283 
1284 static int venus_session_start(struct venus_inst *inst)
1285 {
1286 	return venus_session_cmd(inst, HFI_CMD_SESSION_START, true);
1287 }
1288 
1289 static int venus_session_stop(struct venus_inst *inst)
1290 {
1291 	return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true);
1292 }
1293 
1294 static int venus_session_continue(struct venus_inst *inst)
1295 {
1296 	return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false);
1297 }
1298 
1299 static int venus_session_etb(struct venus_inst *inst,
1300 			     struct hfi_frame_data *in_frame)
1301 {
1302 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1303 	u32 session_type = inst->session_type;
1304 	int ret;
1305 
1306 	if (session_type == VIDC_SESSION_TYPE_DEC) {
1307 		struct hfi_session_empty_buffer_compressed_pkt pkt;
1308 
1309 		ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
1310 		if (ret)
1311 			return ret;
1312 
1313 		ret = venus_iface_cmdq_write(hdev, &pkt, false);
1314 	} else if (session_type == VIDC_SESSION_TYPE_ENC) {
1315 		struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
1316 
1317 		ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
1318 		if (ret)
1319 			return ret;
1320 
1321 		ret = venus_iface_cmdq_write(hdev, &pkt, false);
1322 	} else {
1323 		ret = -EINVAL;
1324 	}
1325 
1326 	return ret;
1327 }
1328 
1329 static int venus_session_ftb(struct venus_inst *inst,
1330 			     struct hfi_frame_data *out_frame)
1331 {
1332 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1333 	struct hfi_session_fill_buffer_pkt pkt;
1334 	int ret;
1335 
1336 	ret = pkt_session_ftb(&pkt, inst, out_frame);
1337 	if (ret)
1338 		return ret;
1339 
1340 	return venus_iface_cmdq_write(hdev, &pkt, false);
1341 }
1342 
1343 static int venus_session_set_buffers(struct venus_inst *inst,
1344 				     struct hfi_buffer_desc *bd)
1345 {
1346 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1347 	struct hfi_session_set_buffers_pkt *pkt;
1348 	u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1349 	int ret;
1350 
1351 	if (bd->buffer_type == HFI_BUFFER_INPUT)
1352 		return 0;
1353 
1354 	pkt = (struct hfi_session_set_buffers_pkt *)packet;
1355 
1356 	ret = pkt_session_set_buffers(pkt, inst, bd);
1357 	if (ret)
1358 		return ret;
1359 
1360 	return venus_iface_cmdq_write(hdev, pkt, false);
1361 }
1362 
1363 static int venus_session_unset_buffers(struct venus_inst *inst,
1364 				       struct hfi_buffer_desc *bd)
1365 {
1366 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1367 	struct hfi_session_release_buffer_pkt *pkt;
1368 	u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1369 	int ret;
1370 
1371 	if (bd->buffer_type == HFI_BUFFER_INPUT)
1372 		return 0;
1373 
1374 	pkt = (struct hfi_session_release_buffer_pkt *)packet;
1375 
1376 	ret = pkt_session_unset_buffers(pkt, inst, bd);
1377 	if (ret)
1378 		return ret;
1379 
1380 	return venus_iface_cmdq_write(hdev, pkt, true);
1381 }
1382 
1383 static int venus_session_load_res(struct venus_inst *inst)
1384 {
1385 	return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true);
1386 }
1387 
1388 static int venus_session_release_res(struct venus_inst *inst)
1389 {
1390 	return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true);
1391 }
1392 
1393 static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1394 				       u32 seq_hdr_len)
1395 {
1396 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1397 	struct hfi_session_parse_sequence_header_pkt *pkt;
1398 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1399 	int ret;
1400 
1401 	pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
1402 
1403 	ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
1404 	if (ret)
1405 		return ret;
1406 
1407 	ret = venus_iface_cmdq_write(hdev, pkt, false);
1408 	if (ret)
1409 		return ret;
1410 
1411 	return 0;
1412 }
1413 
1414 static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1415 				     u32 seq_hdr_len)
1416 {
1417 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1418 	struct hfi_session_get_sequence_header_pkt *pkt;
1419 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1420 	int ret;
1421 
1422 	pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
1423 
1424 	ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
1425 	if (ret)
1426 		return ret;
1427 
1428 	return venus_iface_cmdq_write(hdev, pkt, false);
1429 }
1430 
1431 static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
1432 				      void *pdata)
1433 {
1434 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1435 	struct hfi_session_set_property_pkt *pkt;
1436 	u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1437 	int ret;
1438 
1439 	pkt = (struct hfi_session_set_property_pkt *)packet;
1440 
1441 	ret = pkt_session_set_property(pkt, inst, ptype, pdata);
1442 	if (ret == -ENOTSUPP)
1443 		return 0;
1444 	if (ret)
1445 		return ret;
1446 
1447 	return venus_iface_cmdq_write(hdev, pkt, false);
1448 }
1449 
1450 static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
1451 {
1452 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1453 	struct hfi_session_get_property_pkt pkt;
1454 	int ret;
1455 
1456 	ret = pkt_session_get_property(&pkt, inst, ptype);
1457 	if (ret)
1458 		return ret;
1459 
1460 	return venus_iface_cmdq_write(hdev, &pkt, true);
1461 }
1462 
1463 static int venus_resume(struct venus_core *core)
1464 {
1465 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1466 	int ret = 0;
1467 
1468 	mutex_lock(&hdev->lock);
1469 
1470 	if (!hdev->suspended)
1471 		goto unlock;
1472 
1473 	ret = venus_power_on(hdev);
1474 
1475 unlock:
1476 	if (!ret)
1477 		hdev->suspended = false;
1478 
1479 	mutex_unlock(&hdev->lock);
1480 
1481 	return ret;
1482 }
1483 
1484 static int venus_suspend_1xx(struct venus_core *core)
1485 {
1486 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1487 	struct device *dev = core->dev;
1488 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1489 	u32 ctrl_status;
1490 	int ret;
1491 
1492 	if (!hdev->power_enabled || hdev->suspended)
1493 		return 0;
1494 
1495 	mutex_lock(&hdev->lock);
1496 	ret = venus_is_valid_state(hdev);
1497 	mutex_unlock(&hdev->lock);
1498 
1499 	if (!ret) {
1500 		dev_err(dev, "bad state, cannot suspend\n");
1501 		return -EINVAL;
1502 	}
1503 
1504 	ret = venus_prepare_power_collapse(hdev, true);
1505 	if (ret) {
1506 		dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1507 		return ret;
1508 	}
1509 
1510 	mutex_lock(&hdev->lock);
1511 
1512 	if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
1513 		mutex_unlock(&hdev->lock);
1514 		return -EINVAL;
1515 	}
1516 
1517 	ret = venus_are_queues_empty(hdev);
1518 	if (ret < 0 || !ret) {
1519 		mutex_unlock(&hdev->lock);
1520 		return -EINVAL;
1521 	}
1522 
1523 	ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1524 	if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
1525 		mutex_unlock(&hdev->lock);
1526 		return -EINVAL;
1527 	}
1528 
1529 	ret = venus_power_off(hdev);
1530 	if (ret) {
1531 		mutex_unlock(&hdev->lock);
1532 		return ret;
1533 	}
1534 
1535 	hdev->suspended = true;
1536 
1537 	mutex_unlock(&hdev->lock);
1538 
1539 	return 0;
1540 }
1541 
1542 static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
1543 {
1544 	void __iomem *wrapper_base = hdev->core->wrapper_base;
1545 	void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1546 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1547 	u32 ctrl_status, cpu_status;
1548 
1549 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core))
1550 		cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1551 	else
1552 		cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1553 	ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1554 
1555 	if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1556 	    ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1557 		return true;
1558 
1559 	return false;
1560 }
1561 
1562 static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
1563 {
1564 	void __iomem *wrapper_base = hdev->core->wrapper_base;
1565 	void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1566 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1567 	u32 ctrl_status, cpu_status;
1568 
1569 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core))
1570 		cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1571 	else
1572 		cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1573 	ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1574 
1575 	if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1576 	    ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1577 		return true;
1578 
1579 	return false;
1580 }
1581 
1582 static int venus_suspend_3xx(struct venus_core *core)
1583 {
1584 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1585 	struct device *dev = core->dev;
1586 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1587 	u32 ctrl_status;
1588 	bool val;
1589 	int ret;
1590 
1591 	if (!hdev->power_enabled || hdev->suspended)
1592 		return 0;
1593 
1594 	mutex_lock(&hdev->lock);
1595 	ret = venus_is_valid_state(hdev);
1596 	mutex_unlock(&hdev->lock);
1597 
1598 	if (!ret) {
1599 		dev_err(dev, "bad state, cannot suspend\n");
1600 		return -EINVAL;
1601 	}
1602 
1603 	ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1604 	if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1605 		goto power_off;
1606 
1607 	/*
1608 	 * Power collapse sequence for Venus 3xx and 4xx versions:
1609 	 * 1. Check for ARM9 and video core to be idle by checking WFI bit
1610 	 *    (bit 0) in CPU status register and by checking Idle (bit 30) in
1611 	 *    Control status register for video core.
1612 	 * 2. Send a command to prepare for power collapse.
1613 	 * 3. Check for WFI and PC_READY bits.
1614 	 */
1615 	ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
1616 				 1500, 100 * 1500);
1617 	if (ret) {
1618 		dev_err(dev, "wait for cpu and video core idle fail (%d)\n", ret);
1619 		return ret;
1620 	}
1621 
1622 	ret = venus_prepare_power_collapse(hdev, false);
1623 	if (ret) {
1624 		dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1625 		return ret;
1626 	}
1627 
1628 	ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
1629 				 1500, 100 * 1500);
1630 	if (ret)
1631 		return ret;
1632 
1633 power_off:
1634 	mutex_lock(&hdev->lock);
1635 
1636 	ret = venus_power_off(hdev);
1637 	if (ret) {
1638 		dev_err(dev, "venus_power_off (%d)\n", ret);
1639 		mutex_unlock(&hdev->lock);
1640 		return ret;
1641 	}
1642 
1643 	hdev->suspended = true;
1644 
1645 	mutex_unlock(&hdev->lock);
1646 
1647 	return 0;
1648 }
1649 
1650 static int venus_suspend(struct venus_core *core)
1651 {
1652 	if (IS_V3(core) || IS_V4(core) || IS_V6(core))
1653 		return venus_suspend_3xx(core);
1654 
1655 	return venus_suspend_1xx(core);
1656 }
1657 
1658 static const struct hfi_ops venus_hfi_ops = {
1659 	.core_init			= venus_core_init,
1660 	.core_deinit			= venus_core_deinit,
1661 	.core_trigger_ssr		= venus_core_trigger_ssr,
1662 
1663 	.session_init			= venus_session_init,
1664 	.session_end			= venus_session_end,
1665 	.session_abort			= venus_session_abort,
1666 	.session_flush			= venus_session_flush,
1667 	.session_start			= venus_session_start,
1668 	.session_stop			= venus_session_stop,
1669 	.session_continue		= venus_session_continue,
1670 	.session_etb			= venus_session_etb,
1671 	.session_ftb			= venus_session_ftb,
1672 	.session_set_buffers		= venus_session_set_buffers,
1673 	.session_unset_buffers		= venus_session_unset_buffers,
1674 	.session_load_res		= venus_session_load_res,
1675 	.session_release_res		= venus_session_release_res,
1676 	.session_parse_seq_hdr		= venus_session_parse_seq_hdr,
1677 	.session_get_seq_hdr		= venus_session_get_seq_hdr,
1678 	.session_set_property		= venus_session_set_property,
1679 	.session_get_property		= venus_session_get_property,
1680 
1681 	.resume				= venus_resume,
1682 	.suspend			= venus_suspend,
1683 
1684 	.isr				= venus_isr,
1685 	.isr_thread			= venus_isr_thread,
1686 };
1687 
1688 void venus_hfi_destroy(struct venus_core *core)
1689 {
1690 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1691 
1692 	core->priv = NULL;
1693 	venus_interface_queues_release(hdev);
1694 	mutex_destroy(&hdev->lock);
1695 	kfree(hdev);
1696 	disable_irq(core->irq);
1697 	core->ops = NULL;
1698 }
1699 
1700 int venus_hfi_create(struct venus_core *core)
1701 {
1702 	struct venus_hfi_device *hdev;
1703 	int ret;
1704 
1705 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
1706 	if (!hdev)
1707 		return -ENOMEM;
1708 
1709 	mutex_init(&hdev->lock);
1710 
1711 	hdev->core = core;
1712 	hdev->suspended = true;
1713 	core->priv = hdev;
1714 	core->ops = &venus_hfi_ops;
1715 
1716 	ret = venus_interface_queues_init(hdev);
1717 	if (ret)
1718 		goto err_kfree;
1719 
1720 	return 0;
1721 
1722 err_kfree:
1723 	kfree(hdev);
1724 	core->priv = NULL;
1725 	core->ops = NULL;
1726 	return ret;
1727 }
1728 
1729 void venus_hfi_queues_reinit(struct venus_core *core)
1730 {
1731 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1732 	struct hfi_queue_table_header *tbl_hdr;
1733 	struct iface_queue *queue;
1734 	struct hfi_sfr *sfr;
1735 	unsigned int i;
1736 
1737 	mutex_lock(&hdev->lock);
1738 
1739 	for (i = 0; i < IFACEQ_NUM; i++) {
1740 		queue = &hdev->queues[i];
1741 		queue->qhdr =
1742 			IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
1743 
1744 		venus_set_qhdr_defaults(queue->qhdr);
1745 
1746 		queue->qhdr->start_addr = queue->qmem.da;
1747 
1748 		if (i == IFACEQ_CMD_IDX)
1749 			queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
1750 		else if (i == IFACEQ_MSG_IDX)
1751 			queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
1752 		else if (i == IFACEQ_DBG_IDX)
1753 			queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
1754 	}
1755 
1756 	tbl_hdr = hdev->ifaceq_table.kva;
1757 	tbl_hdr->version = 0;
1758 	tbl_hdr->size = IFACEQ_TABLE_SIZE;
1759 	tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
1760 	tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
1761 	tbl_hdr->num_q = IFACEQ_NUM;
1762 	tbl_hdr->num_active_q = IFACEQ_NUM;
1763 
1764 	/*
1765 	 * Set receive request to zero on debug queue as there is no
1766 	 * need of interrupt from video hardware for debug messages
1767 	 */
1768 	queue = &hdev->queues[IFACEQ_DBG_IDX];
1769 	queue->qhdr->rx_req = 0;
1770 
1771 	sfr = hdev->sfr.kva;
1772 	sfr->buf_size = ALIGNED_SFR_SIZE;
1773 
1774 	/* ensure table and queue header structs are settled in memory */
1775 	wmb();
1776 
1777 	mutex_unlock(&hdev->lock);
1778 }
1779