xref: /linux/drivers/media/platform/qcom/iris/iris_vpu_common.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/iopoll.h>
7 #include <linux/pm_opp.h>
8 #include <linux/reset.h>
9 
10 #include "iris_core.h"
11 #include "iris_instance.h"
12 #include "iris_vpu_common.h"
13 #include "iris_vpu_register_defines.h"
14 
15 #define AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL	(AON_BASE_OFFS + 0x2C)
16 #define AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS	(AON_BASE_OFFS + 0x30)
17 
18 #define CTRL_INIT				(CPU_CS_BASE_OFFS + 0x48)
19 #define CTRL_STATUS				(CPU_CS_BASE_OFFS + 0x4C)
20 
21 #define CTRL_INIT_IDLE_MSG_BMSK			0x40000000
22 #define CTRL_ERROR_STATUS__M			0xfe
23 #define CTRL_STATUS_PC_READY			0x100
24 
25 #define QTBL_INFO				(CPU_CS_BASE_OFFS + 0x50)
26 #define QTBL_ENABLE				BIT(0)
27 
28 #define QTBL_ADDR				(CPU_CS_BASE_OFFS + 0x54)
29 #define CPU_CS_SCIACMDARG3			(CPU_CS_BASE_OFFS + 0x58)
30 #define SFR_ADDR				(CPU_CS_BASE_OFFS + 0x5C)
31 #define UC_REGION_ADDR				(CPU_CS_BASE_OFFS + 0x64)
32 #define UC_REGION_SIZE				(CPU_CS_BASE_OFFS + 0x68)
33 
34 static void iris_vpu_interrupt_init(struct iris_core *core)
35 {
36 	u32 mask_val;
37 
38 	mask_val = readl(core->reg_base + WRAPPER_INTR_MASK);
39 	mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BMSK |
40 		      WRAPPER_INTR_MASK_A2HCPU_BMSK);
41 	writel(mask_val, core->reg_base + WRAPPER_INTR_MASK);
42 }
43 
44 static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
45 {
46 	u32 queue_size, value;
47 	const struct vpu_ops *vpu_ops = core->iris_platform_data->vpu_ops;
48 
49 	/* Iris hardware requires 4K queue alignment */
50 	queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
51 		(IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
52 
53 	value = (u32)core->iface_q_table_daddr;
54 	writel(value, core->reg_base + UC_REGION_ADDR);
55 
56 	/* Iris hardware requires 1M queue alignment */
57 	value = ALIGN(SFR_SIZE + queue_size, SZ_1M);
58 	writel(value, core->reg_base + UC_REGION_SIZE);
59 
60 	value = (u32)core->iface_q_table_daddr;
61 	writel(value, core->reg_base + QTBL_ADDR);
62 
63 	writel(QTBL_ENABLE, core->reg_base + QTBL_INFO);
64 
65 	if (core->sfr_daddr) {
66 		value = (u32)core->sfr_daddr + core->iris_platform_data->core_arch;
67 		writel(value, core->reg_base + SFR_ADDR);
68 	}
69 
70 	if (vpu_ops->program_bootup_registers)
71 		vpu_ops->program_bootup_registers(core);
72 }
73 
74 int iris_vpu_boot_firmware(struct iris_core *core)
75 {
76 	u32 ctrl_init = BIT(0), ctrl_status = 0, count = 0, max_tries = 1000;
77 
78 	iris_vpu_setup_ucregion_memory_map(core);
79 
80 	writel(ctrl_init, core->reg_base + CTRL_INIT);
81 	writel(0x1, core->reg_base + CPU_CS_SCIACMDARG3);
82 
83 	while (!ctrl_status && count < max_tries) {
84 		ctrl_status = readl(core->reg_base + CTRL_STATUS);
85 		if ((ctrl_status & CTRL_ERROR_STATUS__M) == 0x4) {
86 			dev_err(core->dev, "invalid setting for uc_region\n");
87 			break;
88 		}
89 
90 		usleep_range(50, 100);
91 		count++;
92 	}
93 
94 	if (count >= max_tries) {
95 		dev_err(core->dev, "error booting up iris firmware\n");
96 		return -ETIME;
97 	}
98 
99 	writel(HOST2XTENSA_INTR_ENABLE, core->reg_base + CPU_CS_H2XSOFTINTEN);
100 	writel(0x0, core->reg_base + CPU_CS_X2RPMH);
101 
102 	return 0;
103 }
104 
105 void iris_vpu_raise_interrupt(struct iris_core *core)
106 {
107 	writel(1 << CPU_IC_SOFTINT_H2A_SHFT, core->reg_base + CPU_IC_SOFTINT);
108 }
109 
110 void iris_vpu_clear_interrupt(struct iris_core *core)
111 {
112 	u32 intr_status, mask;
113 
114 	intr_status = readl(core->reg_base + WRAPPER_INTR_STATUS);
115 	mask = (WRAPPER_INTR_STATUS_A2H_BMSK |
116 		WRAPPER_INTR_STATUS_A2HWD_BMSK |
117 		CTRL_INIT_IDLE_MSG_BMSK);
118 
119 	if (intr_status & mask)
120 		core->intr_status |= intr_status;
121 
122 	writel(CLEAR_XTENSA2HOST_INTR, core->reg_base + CPU_CS_A2HSOFTINTCLR);
123 }
124 
125 int iris_vpu_watchdog(struct iris_core *core, u32 intr_status)
126 {
127 	if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK) {
128 		dev_err(core->dev, "received watchdog interrupt\n");
129 		return -ETIME;
130 	}
131 
132 	return 0;
133 }
134 
135 int iris_vpu_prepare_pc(struct iris_core *core)
136 {
137 	u32 wfi_status, idle_status, pc_ready;
138 	u32 ctrl_status, val = 0;
139 	int ret;
140 
141 	ctrl_status = readl(core->reg_base + CTRL_STATUS);
142 	pc_ready = ctrl_status & CTRL_STATUS_PC_READY;
143 	idle_status = ctrl_status & BIT(30);
144 	if (pc_ready)
145 		return 0;
146 
147 	wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
148 	wfi_status &= BIT(0);
149 	if (!wfi_status || !idle_status)
150 		goto skip_power_off;
151 
152 	ret = core->hfi_ops->sys_pc_prep(core);
153 	if (ret)
154 		goto skip_power_off;
155 
156 	ret = readl_poll_timeout(core->reg_base + CTRL_STATUS, val,
157 				 val & CTRL_STATUS_PC_READY, 250, 2500);
158 	if (ret)
159 		goto skip_power_off;
160 
161 	ret = readl_poll_timeout(core->reg_base + WRAPPER_TZ_CPU_STATUS,
162 				 val, val & BIT(0), 250, 2500);
163 	if (ret)
164 		goto skip_power_off;
165 
166 	return 0;
167 
168 skip_power_off:
169 	ctrl_status = readl(core->reg_base + CTRL_STATUS);
170 	wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
171 	wfi_status &= BIT(0);
172 	dev_err(core->dev, "skip power collapse, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n",
173 		wfi_status, idle_status, pc_ready, ctrl_status);
174 
175 	return -EAGAIN;
176 }
177 
178 int iris_vpu_power_off_controller(struct iris_core *core)
179 {
180 	u32 val = 0;
181 	int ret;
182 
183 	writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
184 
185 	if (!core->iris_platform_data->no_aon) {
186 		writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
187 
188 		ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
189 					 val, val & BIT(0), 200, 2000);
190 		if (ret)
191 			goto disable_power;
192 	}
193 
194 	writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
195 
196 	ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
197 				 val, val & BIT(0), 200, 2000);
198 	if (ret)
199 		goto disable_power;
200 
201 	writel(0x0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
202 
203 	ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
204 				 val, val == 0, 200, 2000);
205 	if (ret)
206 		goto disable_power;
207 
208 	writel(CTL_AXI_CLK_HALT | CTL_CLK_HALT,
209 	       core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
210 	writel(RESET_HIGH, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
211 	writel(0x0, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
212 	writel(0x0, core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
213 
214 disable_power:
215 	iris_disable_unprepare_clock(core, IRIS_AHB_CLK);
216 	iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
217 	iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
218 	iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
219 
220 	return 0;
221 }
222 
223 void iris_vpu_power_off_hw(struct iris_core *core)
224 {
225 	dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], false);
226 	iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
227 	iris_disable_unprepare_clock(core, IRIS_HW_AHB_CLK);
228 	iris_disable_unprepare_clock(core, IRIS_HW_CLK);
229 }
230 
231 void iris_vpu_power_off(struct iris_core *core)
232 {
233 	iris_opp_set_rate(core->dev, 0);
234 	core->iris_platform_data->vpu_ops->power_off_hw(core);
235 	core->iris_platform_data->vpu_ops->power_off_controller(core);
236 	iris_unset_icc_bw(core);
237 
238 	if (!iris_vpu_watchdog(core, core->intr_status))
239 		disable_irq_nosync(core->irq);
240 }
241 
242 int iris_vpu_power_on_controller(struct iris_core *core)
243 {
244 	u32 rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
245 	int ret;
246 
247 	ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
248 	if (ret)
249 		return ret;
250 
251 	ret = reset_control_bulk_reset(rst_tbl_size, core->resets);
252 	if (ret)
253 		goto err_disable_power;
254 
255 	ret = iris_prepare_enable_clock(core, IRIS_AXI_CLK);
256 	if (ret)
257 		goto err_disable_power;
258 
259 	ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK);
260 	if (ret)
261 		goto err_disable_axi_clock;
262 
263 	ret = iris_prepare_enable_clock(core, IRIS_AHB_CLK);
264 	if (ret && ret != -ENOENT)
265 		goto err_disable_ctrl_clock;
266 
267 	return 0;
268 
269 err_disable_ctrl_clock:
270 	iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
271 err_disable_axi_clock:
272 	iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
273 err_disable_power:
274 	iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
275 
276 	return ret;
277 }
278 
279 int iris_vpu_power_on_hw(struct iris_core *core)
280 {
281 	int ret;
282 
283 	ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
284 	if (ret)
285 		return ret;
286 
287 	ret = iris_prepare_enable_clock(core, IRIS_HW_CLK);
288 	if (ret)
289 		goto err_disable_power;
290 
291 	ret = iris_prepare_enable_clock(core, IRIS_HW_AHB_CLK);
292 	if (ret && ret != -ENOENT)
293 		goto err_disable_hw_clock;
294 
295 	ret = dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], true);
296 	if (ret)
297 		goto err_disable_hw_ahb_clock;
298 
299 	return 0;
300 
301 err_disable_hw_ahb_clock:
302 	iris_disable_unprepare_clock(core, IRIS_HW_AHB_CLK);
303 err_disable_hw_clock:
304 	iris_disable_unprepare_clock(core, IRIS_HW_CLK);
305 err_disable_power:
306 	iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
307 
308 	return ret;
309 }
310 
311 int iris_vpu35_vpu4x_power_off_controller(struct iris_core *core)
312 {
313 	u32 clk_rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
314 	bool handshake_done, handshake_busy;
315 	u32 count = 0, val = 0;
316 	int ret;
317 
318 	writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
319 
320 	writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
321 
322 	ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
323 				 val, val & BIT(0), 200, 2000);
324 	if (ret)
325 		goto disable_power;
326 
327 	writel(0, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
328 
329 	/* Retry up to 1000 times as recommended by hardware documentation */
330 	do {
331 		/* set MNoC to low power */
332 		writel(REQ_POWER_DOWN_PREP, core->reg_base +
333 		       AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
334 		usleep_range(10, 20);
335 		val = readl(core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS);
336 
337 		handshake_done = val & NOC_LPI_STATUS_DONE;
338 		handshake_busy = val & (NOC_LPI_STATUS_DENY | NOC_LPI_STATUS_ACTIVE);
339 
340 		if (handshake_done || !handshake_busy)
341 			break;
342 
343 		writel(0, core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
344 		usleep_range(10, 20);
345 
346 	} while (++count < 1000);
347 
348 	if (!handshake_done && handshake_busy)
349 		dev_err(core->dev, "LPI handshake timeout\n");
350 
351 	ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS,
352 				 val, val & BIT(0), 200, 2000);
353 	if (ret)
354 		goto disable_power;
355 
356 	writel(0, core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
357 
358 	writel(0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
359 
360 	readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
361 			   val, val == 0, 200, 2000);
362 
363 disable_power:
364 	iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
365 	iris_disable_unprepare_clock(core, IRIS_CTRL_FREERUN_CLK);
366 	iris_disable_unprepare_clock(core, IRIS_AXI1_CLK);
367 
368 	iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
369 
370 	reset_control_bulk_reset(clk_rst_tbl_size, core->resets);
371 
372 	return 0;
373 }
374 
375 int iris_vpu35_vpu4x_power_on_controller(struct iris_core *core)
376 {
377 	int ret;
378 
379 	ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
380 	if (ret)
381 		return ret;
382 
383 	ret = iris_prepare_enable_clock(core, IRIS_AXI1_CLK);
384 	if (ret)
385 		goto err_disable_power;
386 
387 	ret = iris_prepare_enable_clock(core, IRIS_CTRL_FREERUN_CLK);
388 	if (ret)
389 		goto err_disable_axi1_clk;
390 
391 	ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK);
392 	if (ret)
393 		goto err_disable_ctrl_free_clk;
394 
395 	return 0;
396 
397 err_disable_ctrl_free_clk:
398 	iris_disable_unprepare_clock(core, IRIS_CTRL_FREERUN_CLK);
399 err_disable_axi1_clk:
400 	iris_disable_unprepare_clock(core, IRIS_AXI1_CLK);
401 err_disable_power:
402 	iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
403 
404 	return ret;
405 }
406 
407 void iris_vpu35_vpu4x_program_bootup_registers(struct iris_core *core)
408 {
409 	writel(0x1, core->reg_base + WRAPPER_IRIS_VCODEC_VPU_WRAPPER_SPARE_0);
410 }
411 
412 u64 iris_vpu3x_vpu4x_calculate_frequency(struct iris_inst *inst, size_t data_size)
413 {
414 	struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
415 	struct v4l2_format *inp_f = inst->fmt_src;
416 	u32 height, width, mbs_per_second, mbpf;
417 	u64 fw_cycles, fw_vpp_cycles;
418 	u64 vsp_cycles, vpp_cycles;
419 	u32 fps = DEFAULT_FPS;
420 
421 	width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
422 	height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
423 
424 	mbpf = NUM_MBS_PER_FRAME(height, width);
425 	mbs_per_second = mbpf * fps;
426 
427 	fw_cycles = fps * caps->mb_cycles_fw;
428 	fw_vpp_cycles = fps * caps->mb_cycles_fw_vpp;
429 
430 	vpp_cycles = mult_frac(mbs_per_second, caps->mb_cycles_vpp, (u32)inst->fw_caps[PIPE].value);
431 	/* 21 / 20 is minimum overhead factor */
432 	vpp_cycles += max(div_u64(vpp_cycles, 20), fw_vpp_cycles);
433 
434 	/* 1.059 is multi-pipe overhead */
435 	if (inst->fw_caps[PIPE].value > 1)
436 		vpp_cycles += div_u64(vpp_cycles * 59, 1000);
437 
438 	vsp_cycles = fps * data_size * 8;
439 	vsp_cycles = div_u64(vsp_cycles, 2);
440 	/* VSP FW overhead 1.05 */
441 	vsp_cycles = div_u64(vsp_cycles * 21, 20);
442 
443 	if (inst->fw_caps[STAGE].value == STAGE_1)
444 		vsp_cycles = vsp_cycles * 3;
445 
446 	return max3(vpp_cycles, vsp_cycles, fw_cycles);
447 }
448 
449 int iris_vpu_power_on(struct iris_core *core)
450 {
451 	u32 freq;
452 	int ret;
453 
454 	ret = iris_set_icc_bw(core, INT_MAX);
455 	if (ret)
456 		goto err;
457 
458 	ret = core->iris_platform_data->vpu_ops->power_on_controller(core);
459 	if (ret)
460 		goto err_unvote_icc;
461 
462 	ret = core->iris_platform_data->vpu_ops->power_on_hw(core);
463 	if (ret)
464 		goto err_power_off_ctrl;
465 
466 	freq = core->power.clk_freq ? core->power.clk_freq :
467 				      (u32)ULONG_MAX;
468 
469 	iris_opp_set_rate(core->dev, freq);
470 
471 	core->iris_platform_data->set_preset_registers(core);
472 
473 	iris_vpu_interrupt_init(core);
474 	core->intr_status = 0;
475 	enable_irq(core->irq);
476 
477 	return 0;
478 
479 err_power_off_ctrl:
480 	core->iris_platform_data->vpu_ops->power_off_controller(core);
481 err_unvote_icc:
482 	iris_unset_icc_bw(core);
483 err:
484 	dev_err(core->dev, "power on failed\n");
485 
486 	return ret;
487 }
488