1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 */ 5 6 #include <linux/iopoll.h> 7 #include <linux/pm_opp.h> 8 #include <linux/reset.h> 9 10 #include "iris_core.h" 11 #include "iris_vpu_common.h" 12 #include "iris_vpu_register_defines.h" 13 14 #define WRAPPER_TZ_BASE_OFFS 0x000C0000 15 #define AON_BASE_OFFS 0x000E0000 16 17 #define CPU_IC_BASE_OFFS (CPU_BASE_OFFS) 18 19 #define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE_OFFS + 0x1C) 20 #define CLEAR_XTENSA2HOST_INTR BIT(0) 21 22 #define CTRL_INIT (CPU_CS_BASE_OFFS + 0x48) 23 #define CTRL_STATUS (CPU_CS_BASE_OFFS + 0x4C) 24 25 #define CTRL_INIT_IDLE_MSG_BMSK 0x40000000 26 #define CTRL_ERROR_STATUS__M 0xfe 27 #define CTRL_STATUS_PC_READY 0x100 28 29 #define QTBL_INFO (CPU_CS_BASE_OFFS + 0x50) 30 #define QTBL_ENABLE BIT(0) 31 32 #define QTBL_ADDR (CPU_CS_BASE_OFFS + 0x54) 33 #define CPU_CS_SCIACMDARG3 (CPU_CS_BASE_OFFS + 0x58) 34 #define SFR_ADDR (CPU_CS_BASE_OFFS + 0x5C) 35 #define UC_REGION_ADDR (CPU_CS_BASE_OFFS + 0x64) 36 #define UC_REGION_SIZE (CPU_CS_BASE_OFFS + 0x68) 37 38 #define CPU_CS_H2XSOFTINTEN (CPU_CS_BASE_OFFS + 0x148) 39 #define HOST2XTENSA_INTR_ENABLE BIT(0) 40 41 #define CPU_CS_X2RPMH (CPU_CS_BASE_OFFS + 0x168) 42 #define MSK_SIGNAL_FROM_TENSILICA BIT(0) 43 #define MSK_CORE_POWER_ON BIT(1) 44 45 #define CPU_IC_SOFTINT (CPU_IC_BASE_OFFS + 0x150) 46 #define CPU_IC_SOFTINT_H2A_SHFT 0x0 47 48 #define WRAPPER_INTR_STATUS (WRAPPER_BASE_OFFS + 0x0C) 49 #define WRAPPER_INTR_STATUS_A2HWD_BMSK BIT(3) 50 #define WRAPPER_INTR_STATUS_A2H_BMSK BIT(2) 51 52 #define WRAPPER_INTR_MASK (WRAPPER_BASE_OFFS + 0x10) 53 #define WRAPPER_INTR_MASK_A2HWD_BMSK BIT(3) 54 #define WRAPPER_INTR_MASK_A2HCPU_BMSK BIT(2) 55 56 #define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x54) 57 #define WRAPPER_DEBUG_BRIDGE_LPI_STATUS (WRAPPER_BASE_OFFS + 0x58) 58 #define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C) 59 #define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60) 60 61 #define WRAPPER_TZ_CPU_STATUS (WRAPPER_TZ_BASE_OFFS + 0x10) 62 #define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14) 63 #define CTL_AXI_CLK_HALT BIT(0) 64 #define CTL_CLK_HALT BIT(1) 65 66 #define WRAPPER_TZ_QNS4PDXFIFO_RESET (WRAPPER_TZ_BASE_OFFS + 0x18) 67 #define RESET_HIGH BIT(0) 68 69 #define AON_WRAPPER_MVP_NOC_LPI_CONTROL (AON_BASE_OFFS) 70 #define REQ_POWER_DOWN_PREP BIT(0) 71 72 #define AON_WRAPPER_MVP_NOC_LPI_STATUS (AON_BASE_OFFS + 0x4) 73 74 static void iris_vpu_interrupt_init(struct iris_core *core) 75 { 76 u32 mask_val; 77 78 mask_val = readl(core->reg_base + WRAPPER_INTR_MASK); 79 mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BMSK | 80 WRAPPER_INTR_MASK_A2HCPU_BMSK); 81 writel(mask_val, core->reg_base + WRAPPER_INTR_MASK); 82 } 83 84 static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core) 85 { 86 u32 queue_size, value; 87 const struct vpu_ops *vpu_ops = core->iris_platform_data->vpu_ops; 88 89 /* Iris hardware requires 4K queue alignment */ 90 queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) + 91 (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K); 92 93 value = (u32)core->iface_q_table_daddr; 94 writel(value, core->reg_base + UC_REGION_ADDR); 95 96 /* Iris hardware requires 1M queue alignment */ 97 value = ALIGN(SFR_SIZE + queue_size, SZ_1M); 98 writel(value, core->reg_base + UC_REGION_SIZE); 99 100 value = (u32)core->iface_q_table_daddr; 101 writel(value, core->reg_base + QTBL_ADDR); 102 103 writel(QTBL_ENABLE, core->reg_base + QTBL_INFO); 104 105 if (core->sfr_daddr) { 106 value = (u32)core->sfr_daddr + core->iris_platform_data->core_arch; 107 writel(value, core->reg_base + SFR_ADDR); 108 } 109 110 if (vpu_ops->program_bootup_registers) 111 vpu_ops->program_bootup_registers(core); 112 } 113 114 int iris_vpu_boot_firmware(struct iris_core *core) 115 { 116 u32 ctrl_init = BIT(0), ctrl_status = 0, count = 0, max_tries = 1000; 117 118 iris_vpu_setup_ucregion_memory_map(core); 119 120 writel(ctrl_init, core->reg_base + CTRL_INIT); 121 writel(0x1, core->reg_base + CPU_CS_SCIACMDARG3); 122 123 while (!ctrl_status && count < max_tries) { 124 ctrl_status = readl(core->reg_base + CTRL_STATUS); 125 if ((ctrl_status & CTRL_ERROR_STATUS__M) == 0x4) { 126 dev_err(core->dev, "invalid setting for uc_region\n"); 127 break; 128 } 129 130 usleep_range(50, 100); 131 count++; 132 } 133 134 if (count >= max_tries) { 135 dev_err(core->dev, "error booting up iris firmware\n"); 136 return -ETIME; 137 } 138 139 writel(HOST2XTENSA_INTR_ENABLE, core->reg_base + CPU_CS_H2XSOFTINTEN); 140 writel(0x0, core->reg_base + CPU_CS_X2RPMH); 141 142 return 0; 143 } 144 145 void iris_vpu_raise_interrupt(struct iris_core *core) 146 { 147 writel(1 << CPU_IC_SOFTINT_H2A_SHFT, core->reg_base + CPU_IC_SOFTINT); 148 } 149 150 void iris_vpu_clear_interrupt(struct iris_core *core) 151 { 152 u32 intr_status, mask; 153 154 intr_status = readl(core->reg_base + WRAPPER_INTR_STATUS); 155 mask = (WRAPPER_INTR_STATUS_A2H_BMSK | 156 WRAPPER_INTR_STATUS_A2HWD_BMSK | 157 CTRL_INIT_IDLE_MSG_BMSK); 158 159 if (intr_status & mask) 160 core->intr_status |= intr_status; 161 162 writel(CLEAR_XTENSA2HOST_INTR, core->reg_base + CPU_CS_A2HSOFTINTCLR); 163 } 164 165 int iris_vpu_watchdog(struct iris_core *core, u32 intr_status) 166 { 167 if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK) { 168 dev_err(core->dev, "received watchdog interrupt\n"); 169 return -ETIME; 170 } 171 172 return 0; 173 } 174 175 int iris_vpu_prepare_pc(struct iris_core *core) 176 { 177 u32 wfi_status, idle_status, pc_ready; 178 u32 ctrl_status, val = 0; 179 int ret; 180 181 ctrl_status = readl(core->reg_base + CTRL_STATUS); 182 pc_ready = ctrl_status & CTRL_STATUS_PC_READY; 183 idle_status = ctrl_status & BIT(30); 184 if (pc_ready) 185 return 0; 186 187 wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS); 188 wfi_status &= BIT(0); 189 if (!wfi_status || !idle_status) 190 goto skip_power_off; 191 192 ret = core->hfi_ops->sys_pc_prep(core); 193 if (ret) 194 goto skip_power_off; 195 196 ret = readl_poll_timeout(core->reg_base + CTRL_STATUS, val, 197 val & CTRL_STATUS_PC_READY, 250, 2500); 198 if (ret) 199 goto skip_power_off; 200 201 ret = readl_poll_timeout(core->reg_base + WRAPPER_TZ_CPU_STATUS, 202 val, val & BIT(0), 250, 2500); 203 if (ret) 204 goto skip_power_off; 205 206 return 0; 207 208 skip_power_off: 209 ctrl_status = readl(core->reg_base + CTRL_STATUS); 210 wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS); 211 wfi_status &= BIT(0); 212 dev_err(core->dev, "skip power collapse, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n", 213 wfi_status, idle_status, pc_ready, ctrl_status); 214 215 return -EAGAIN; 216 } 217 218 int iris_vpu_power_off_controller(struct iris_core *core) 219 { 220 u32 val = 0; 221 int ret; 222 223 writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH); 224 225 if (!core->iris_platform_data->no_aon) { 226 writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL); 227 228 ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS, 229 val, val & BIT(0), 200, 2000); 230 if (ret) 231 goto disable_power; 232 } 233 234 writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL); 235 236 ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS, 237 val, val & BIT(0), 200, 2000); 238 if (ret) 239 goto disable_power; 240 241 writel(0x0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL); 242 243 ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS, 244 val, val == 0, 200, 2000); 245 if (ret) 246 goto disable_power; 247 248 writel(CTL_AXI_CLK_HALT | CTL_CLK_HALT, 249 core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG); 250 writel(RESET_HIGH, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET); 251 writel(0x0, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET); 252 writel(0x0, core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG); 253 254 disable_power: 255 iris_disable_unprepare_clock(core, IRIS_AHB_CLK); 256 iris_disable_unprepare_clock(core, IRIS_CTRL_CLK); 257 iris_disable_unprepare_clock(core, IRIS_AXI_CLK); 258 iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]); 259 260 return 0; 261 } 262 263 void iris_vpu_power_off_hw(struct iris_core *core) 264 { 265 dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], false); 266 iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]); 267 iris_disable_unprepare_clock(core, IRIS_HW_AHB_CLK); 268 iris_disable_unprepare_clock(core, IRIS_HW_CLK); 269 } 270 271 void iris_vpu_power_off(struct iris_core *core) 272 { 273 dev_pm_opp_set_rate(core->dev, 0); 274 core->iris_platform_data->vpu_ops->power_off_hw(core); 275 core->iris_platform_data->vpu_ops->power_off_controller(core); 276 iris_unset_icc_bw(core); 277 278 if (!iris_vpu_watchdog(core, core->intr_status)) 279 disable_irq_nosync(core->irq); 280 } 281 282 int iris_vpu_power_on_controller(struct iris_core *core) 283 { 284 u32 rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size; 285 int ret; 286 287 ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]); 288 if (ret) 289 return ret; 290 291 ret = reset_control_bulk_reset(rst_tbl_size, core->resets); 292 if (ret) 293 goto err_disable_power; 294 295 ret = iris_prepare_enable_clock(core, IRIS_AXI_CLK); 296 if (ret) 297 goto err_disable_power; 298 299 ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK); 300 if (ret) 301 goto err_disable_axi_clock; 302 303 ret = iris_prepare_enable_clock(core, IRIS_AHB_CLK); 304 if (ret && ret != -ENOENT) 305 goto err_disable_ctrl_clock; 306 307 return 0; 308 309 err_disable_ctrl_clock: 310 iris_disable_unprepare_clock(core, IRIS_CTRL_CLK); 311 err_disable_axi_clock: 312 iris_disable_unprepare_clock(core, IRIS_AXI_CLK); 313 err_disable_power: 314 iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]); 315 316 return ret; 317 } 318 319 int iris_vpu_power_on_hw(struct iris_core *core) 320 { 321 int ret; 322 323 ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]); 324 if (ret) 325 return ret; 326 327 ret = iris_prepare_enable_clock(core, IRIS_HW_CLK); 328 if (ret) 329 goto err_disable_power; 330 331 ret = iris_prepare_enable_clock(core, IRIS_HW_AHB_CLK); 332 if (ret && ret != -ENOENT) 333 goto err_disable_hw_clock; 334 335 ret = dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], true); 336 if (ret) 337 goto err_disable_hw_ahb_clock; 338 339 return 0; 340 341 err_disable_hw_ahb_clock: 342 iris_disable_unprepare_clock(core, IRIS_HW_AHB_CLK); 343 err_disable_hw_clock: 344 iris_disable_unprepare_clock(core, IRIS_HW_CLK); 345 err_disable_power: 346 iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]); 347 348 return ret; 349 } 350 351 int iris_vpu_power_on(struct iris_core *core) 352 { 353 u32 freq; 354 int ret; 355 356 ret = iris_set_icc_bw(core, INT_MAX); 357 if (ret) 358 goto err; 359 360 ret = core->iris_platform_data->vpu_ops->power_on_controller(core); 361 if (ret) 362 goto err_unvote_icc; 363 364 ret = core->iris_platform_data->vpu_ops->power_on_hw(core); 365 if (ret) 366 goto err_power_off_ctrl; 367 368 freq = core->power.clk_freq ? core->power.clk_freq : 369 (u32)ULONG_MAX; 370 371 dev_pm_opp_set_rate(core->dev, freq); 372 373 core->iris_platform_data->set_preset_registers(core); 374 375 iris_vpu_interrupt_init(core); 376 core->intr_status = 0; 377 enable_irq(core->irq); 378 379 return 0; 380 381 err_power_off_ctrl: 382 core->iris_platform_data->vpu_ops->power_off_controller(core); 383 err_unvote_icc: 384 iris_unset_icc_bw(core); 385 err: 386 dev_err(core->dev, "power on failed\n"); 387 388 return ret; 389 } 390