1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2020 Collabora Ltd. 4 */ 5 #include <linux/arm-smccc.h> 6 #include <linux/clk.h> 7 #include <linux/clk-provider.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/iopoll.h> 11 #include <linux/mfd/syscon.h> 12 #include <linux/of.h> 13 #include <linux/of_clk.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_domain.h> 16 #include <linux/regmap.h> 17 #include <linux/regulator/consumer.h> 18 #include <linux/soc/mediatek/infracfg.h> 19 #include <linux/soc/mediatek/mtk_sip_svc.h> 20 21 #include "mt6735-pm-domains.h" 22 #include "mt6795-pm-domains.h" 23 #include "mt6893-pm-domains.h" 24 #include "mt8167-pm-domains.h" 25 #include "mt8173-pm-domains.h" 26 #include "mt8183-pm-domains.h" 27 #include "mt8186-pm-domains.h" 28 #include "mt8188-pm-domains.h" 29 #include "mt8192-pm-domains.h" 30 #include "mt8195-pm-domains.h" 31 #include "mt8196-pm-domains.h" 32 #include "mt8365-pm-domains.h" 33 34 #define MTK_POLL_DELAY_US 10 35 #define MTK_POLL_TIMEOUT USEC_PER_SEC 36 37 #define MTK_HWV_POLL_DELAY_US 5 38 #define MTK_HWV_POLL_TIMEOUT (300 * USEC_PER_MSEC) 39 40 #define MTK_HWV_PREPARE_DELAY_US 1 41 #define MTK_HWV_PREPARE_TIMEOUT (3 * USEC_PER_MSEC) 42 43 #define PWR_RST_B_BIT BIT(0) 44 #define PWR_ISO_BIT BIT(1) 45 #define PWR_ON_BIT BIT(2) 46 #define PWR_ON_2ND_BIT BIT(3) 47 #define PWR_CLK_DIS_BIT BIT(4) 48 #define PWR_SRAM_CLKISO_BIT BIT(5) 49 #define PWR_SRAM_ISOINT_B_BIT BIT(6) 50 51 #define PWR_RTFF_SAVE BIT(24) 52 #define PWR_RTFF_NRESTORE BIT(25) 53 #define PWR_RTFF_CLK_DIS BIT(26) 54 #define PWR_RTFF_SAVE_FLAG BIT(27) 55 #define PWR_RTFF_UFS_CLK_DIS BIT(28) 56 57 #define MTK_SIP_KERNEL_HWCCF_CONTROL MTK_SIP_SMC_CMD(0x540) 58 59 struct scpsys_domain { 60 struct generic_pm_domain genpd; 61 const struct scpsys_domain_data *data; 62 const struct scpsys_hwv_domain_data *hwv_data; 63 struct scpsys *scpsys; 64 int num_clks; 65 struct clk_bulk_data *clks; 66 int num_subsys_clks; 67 struct clk_bulk_data *subsys_clks; 68 struct regulator *supply; 69 }; 70 71 struct scpsys { 72 struct device *dev; 73 struct regmap *base; 74 const struct scpsys_soc_data *soc_data; 75 u8 bus_prot_index[BUS_PROT_BLOCK_COUNT]; 76 struct regmap **bus_prot; 77 struct genpd_onecell_data pd_data; 78 struct generic_pm_domain *domains[]; 79 }; 80 81 #define to_scpsys_domain(gpd) container_of(gpd, struct scpsys_domain, genpd) 82 83 static bool scpsys_domain_is_on(struct scpsys_domain *pd) 84 { 85 struct scpsys *scpsys = pd->scpsys; 86 u32 mask = pd->data->sta_mask; 87 u32 status, status2, mask2; 88 89 mask2 = pd->data->sta2nd_mask ? pd->data->sta2nd_mask : mask; 90 91 regmap_read(scpsys->base, pd->data->pwr_sta_offs, &status); 92 status &= mask; 93 94 regmap_read(scpsys->base, pd->data->pwr_sta2nd_offs, &status2); 95 status2 &= mask2; 96 97 /* A domain is on when both status bits are set. */ 98 return status && status2; 99 } 100 101 static bool scpsys_hwv_domain_is_disable_done(struct scpsys_domain *pd) 102 { 103 const struct scpsys_hwv_domain_data *hwv = pd->hwv_data; 104 u32 regs[2] = { hwv->done, hwv->clr_sta }; 105 u32 val[2]; 106 u32 mask = BIT(hwv->setclr_bit); 107 108 regmap_multi_reg_read(pd->scpsys->base, regs, val, 2); 109 110 /* Disable is done when the bit is set in DONE, cleared in CLR_STA */ 111 return (val[0] & mask) && !(val[1] & mask); 112 } 113 114 static bool scpsys_hwv_domain_is_enable_done(struct scpsys_domain *pd) 115 { 116 const struct scpsys_hwv_domain_data *hwv = pd->hwv_data; 117 u32 regs[3] = { hwv->done, hwv->en, hwv->set_sta }; 118 u32 val[3]; 119 u32 mask = BIT(hwv->setclr_bit); 120 121 regmap_multi_reg_read(pd->scpsys->base, regs, val, 3); 122 123 /* Enable is done when the bit is set in DONE and EN, cleared in SET_STA */ 124 return (val[0] & mask) && (val[1] & mask) && !(val[2] & mask); 125 } 126 127 static int scpsys_sec_infra_power_on(bool on) 128 { 129 struct arm_smccc_res res; 130 unsigned long cmd = on ? 1 : 0; 131 132 arm_smccc_smc(MTK_SIP_KERNEL_HWCCF_CONTROL, cmd, 0, 0, 0, 0, 0, 0, &res); 133 return res.a0; 134 } 135 136 static int scpsys_sram_enable(struct scpsys_domain *pd) 137 { 138 u32 expected_ack, pdn_ack = pd->data->sram_pdn_ack_bits; 139 struct scpsys *scpsys = pd->scpsys; 140 unsigned int tmp; 141 int ret; 142 143 if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_PDN_INVERTED)) { 144 regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits); 145 expected_ack = pdn_ack; 146 } else { 147 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits); 148 expected_ack = 0; 149 } 150 151 /* Either wait until SRAM_PDN_ACK all 1 or 0 */ 152 ret = regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp, 153 (tmp & pdn_ack) == expected_ack, 154 MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 155 if (ret < 0) 156 return ret; 157 158 if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) { 159 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT); 160 udelay(1); 161 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT); 162 } 163 164 return 0; 165 } 166 167 static int scpsys_sram_disable(struct scpsys_domain *pd) 168 { 169 u32 expected_ack, pdn_ack = pd->data->sram_pdn_ack_bits; 170 struct scpsys *scpsys = pd->scpsys; 171 unsigned int tmp; 172 173 if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) { 174 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT); 175 udelay(1); 176 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT); 177 } 178 179 if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_PDN_INVERTED)) { 180 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits); 181 expected_ack = 0; 182 } else { 183 regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits); 184 expected_ack = pdn_ack; 185 } 186 187 /* Either wait until SRAM_PDN_ACK all 1 or 0 */ 188 return regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp, 189 (tmp & pdn_ack) == expected_ack, 190 MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 191 } 192 193 static struct regmap *scpsys_bus_protect_get_regmap(struct scpsys_domain *pd, 194 const struct scpsys_bus_prot_data *bpd) 195 { 196 struct scpsys *scpsys = pd->scpsys; 197 unsigned short block_idx = scpsys->bus_prot_index[bpd->bus_prot_block]; 198 199 return scpsys->bus_prot[block_idx]; 200 } 201 202 static struct regmap *scpsys_bus_protect_get_sta_regmap(struct scpsys_domain *pd, 203 const struct scpsys_bus_prot_data *bpd) 204 { 205 struct scpsys *scpsys = pd->scpsys; 206 int block_idx = scpsys->bus_prot_index[bpd->bus_prot_sta_block]; 207 208 return scpsys->bus_prot[block_idx]; 209 } 210 211 static int scpsys_bus_protect_clear(struct scpsys_domain *pd, 212 const struct scpsys_bus_prot_data *bpd) 213 { 214 struct regmap *sta_regmap = scpsys_bus_protect_get_sta_regmap(pd, bpd); 215 struct regmap *regmap = scpsys_bus_protect_get_regmap(pd, bpd); 216 u32 sta_mask = bpd->bus_prot_sta_mask; 217 u32 expected_ack; 218 u32 val; 219 220 expected_ack = (bpd->bus_prot_sta_block == BUS_PROT_BLOCK_INFRA_NAO ? sta_mask : 0); 221 222 if (bpd->flags & BUS_PROT_REG_UPDATE) 223 regmap_clear_bits(regmap, bpd->bus_prot_clr, bpd->bus_prot_set_clr_mask); 224 else 225 regmap_write(regmap, bpd->bus_prot_clr, bpd->bus_prot_set_clr_mask); 226 227 if (bpd->flags & BUS_PROT_IGNORE_CLR_ACK) 228 return 0; 229 230 return regmap_read_poll_timeout(sta_regmap, bpd->bus_prot_sta, 231 val, (val & sta_mask) == expected_ack, 232 MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 233 } 234 235 static int scpsys_bus_protect_set(struct scpsys_domain *pd, 236 const struct scpsys_bus_prot_data *bpd) 237 { 238 struct regmap *sta_regmap = scpsys_bus_protect_get_sta_regmap(pd, bpd); 239 struct regmap *regmap = scpsys_bus_protect_get_regmap(pd, bpd); 240 u32 sta_mask = bpd->bus_prot_sta_mask; 241 u32 val; 242 243 if (bpd->flags & BUS_PROT_REG_UPDATE) 244 regmap_set_bits(regmap, bpd->bus_prot_set, bpd->bus_prot_set_clr_mask); 245 else 246 regmap_write(regmap, bpd->bus_prot_set, bpd->bus_prot_set_clr_mask); 247 248 return regmap_read_poll_timeout(sta_regmap, bpd->bus_prot_sta, 249 val, (val & sta_mask) == sta_mask, 250 MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 251 } 252 253 static int scpsys_bus_protect_enable(struct scpsys_domain *pd) 254 { 255 for (int i = 0; i < SPM_MAX_BUS_PROT_DATA; i++) { 256 const struct scpsys_bus_prot_data *bpd = &pd->data->bp_cfg[i]; 257 int ret; 258 259 if (!bpd->bus_prot_set_clr_mask) 260 break; 261 262 if (bpd->flags & BUS_PROT_INVERTED) 263 ret = scpsys_bus_protect_clear(pd, bpd); 264 else 265 ret = scpsys_bus_protect_set(pd, bpd); 266 if (ret) 267 return ret; 268 } 269 270 return 0; 271 } 272 273 static int scpsys_bus_protect_disable(struct scpsys_domain *pd) 274 { 275 for (int i = SPM_MAX_BUS_PROT_DATA - 1; i >= 0; i--) { 276 const struct scpsys_bus_prot_data *bpd = &pd->data->bp_cfg[i]; 277 int ret; 278 279 if (!bpd->bus_prot_set_clr_mask) 280 continue; 281 282 if (bpd->flags & BUS_PROT_INVERTED) 283 ret = scpsys_bus_protect_set(pd, bpd); 284 else 285 ret = scpsys_bus_protect_clear(pd, bpd); 286 if (ret) 287 return ret; 288 } 289 290 return 0; 291 } 292 293 static int scpsys_regulator_enable(struct regulator *supply) 294 { 295 return supply ? regulator_enable(supply) : 0; 296 } 297 298 static int scpsys_regulator_disable(struct regulator *supply) 299 { 300 return supply ? regulator_disable(supply) : 0; 301 } 302 303 static int scpsys_hwv_power_on(struct generic_pm_domain *genpd) 304 { 305 struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd); 306 const struct scpsys_hwv_domain_data *hwv = pd->hwv_data; 307 struct scpsys *scpsys = pd->scpsys; 308 u32 val; 309 int ret; 310 311 if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL)) { 312 ret = scpsys_sec_infra_power_on(true); 313 if (ret) 314 return ret; 315 } 316 317 ret = scpsys_regulator_enable(pd->supply); 318 if (ret) 319 goto err_infra; 320 321 ret = clk_bulk_prepare_enable(pd->num_clks, pd->clks); 322 if (ret) 323 goto err_reg; 324 325 /* For HWV the subsys clocks refer to the HWV low power subsystem */ 326 ret = clk_bulk_prepare_enable(pd->num_subsys_clks, pd->subsys_clks); 327 if (ret) 328 goto err_disable_clks; 329 330 /* Make sure the HW Voter is idle and able to accept commands */ 331 ret = regmap_read_poll_timeout_atomic(scpsys->base, hwv->done, val, 332 val & BIT(hwv->setclr_bit), 333 MTK_HWV_POLL_DELAY_US, 334 MTK_HWV_POLL_TIMEOUT); 335 if (ret) { 336 dev_err(scpsys->dev, "Failed to power on: HW Voter busy.\n"); 337 goto err_disable_subsys_clks; 338 } 339 340 /* 341 * Instruct the HWV to power on the MTCMOS (power domain): after that, 342 * the same bit will be unset immediately by the hardware. 343 */ 344 regmap_write(scpsys->base, hwv->set, BIT(hwv->setclr_bit)); 345 346 /* 347 * Wait until the HWV sets the bit again, signalling that its internal 348 * state machine was started and it now processing the vote command. 349 */ 350 ret = regmap_read_poll_timeout_atomic(scpsys->base, hwv->set, val, 351 val & BIT(hwv->setclr_bit), 352 MTK_HWV_PREPARE_DELAY_US, 353 MTK_HWV_PREPARE_TIMEOUT); 354 if (ret) { 355 dev_err(scpsys->dev, "Failed to power on: HW Voter not starting.\n"); 356 goto err_disable_subsys_clks; 357 } 358 359 /* Wait for ACK, signalling that the MTCMOS was enabled */ 360 ret = readx_poll_timeout_atomic(scpsys_hwv_domain_is_enable_done, pd, val, val, 361 MTK_HWV_POLL_DELAY_US, MTK_HWV_POLL_TIMEOUT); 362 if (ret) { 363 dev_err(scpsys->dev, "Failed to power on: HW Voter ACK timeout.\n"); 364 goto err_disable_subsys_clks; 365 } 366 367 /* It's done! Disable the HWV low power subsystem clocks */ 368 clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); 369 370 if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL)) 371 scpsys_sec_infra_power_on(false); 372 373 return 0; 374 375 err_disable_subsys_clks: 376 clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); 377 err_disable_clks: 378 clk_bulk_disable_unprepare(pd->num_clks, pd->clks); 379 err_reg: 380 scpsys_regulator_disable(pd->supply); 381 err_infra: 382 if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL)) 383 scpsys_sec_infra_power_on(false); 384 return ret; 385 }; 386 387 static int scpsys_hwv_power_off(struct generic_pm_domain *genpd) 388 { 389 struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd); 390 const struct scpsys_hwv_domain_data *hwv = pd->hwv_data; 391 struct scpsys *scpsys = pd->scpsys; 392 u32 val; 393 int ret; 394 395 if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL)) { 396 ret = scpsys_sec_infra_power_on(true); 397 if (ret) 398 return ret; 399 } 400 401 ret = clk_bulk_prepare_enable(pd->num_subsys_clks, pd->subsys_clks); 402 if (ret) 403 goto err_infra; 404 405 /* Make sure the HW Voter is idle and able to accept commands */ 406 ret = regmap_read_poll_timeout_atomic(scpsys->base, hwv->done, val, 407 val & BIT(hwv->setclr_bit), 408 MTK_HWV_POLL_DELAY_US, 409 MTK_HWV_POLL_TIMEOUT); 410 if (ret) 411 goto err_disable_subsys_clks; 412 413 414 /* 415 * Instruct the HWV to power off the MTCMOS (power domain): differently 416 * from poweron, the bit will be kept set. 417 */ 418 regmap_write(scpsys->base, hwv->clr, BIT(hwv->setclr_bit)); 419 420 /* 421 * Wait until the HWV clears the bit, signalling that its internal 422 * state machine was started and it now processing the clear command. 423 */ 424 ret = regmap_read_poll_timeout_atomic(scpsys->base, hwv->clr, val, 425 !(val & BIT(hwv->setclr_bit)), 426 MTK_HWV_PREPARE_DELAY_US, 427 MTK_HWV_PREPARE_TIMEOUT); 428 if (ret) 429 goto err_disable_subsys_clks; 430 431 /* Poweroff needs 100us for the HW to stabilize */ 432 udelay(100); 433 434 /* Wait for ACK, signalling that the MTCMOS was disabled */ 435 ret = readx_poll_timeout_atomic(scpsys_hwv_domain_is_disable_done, pd, val, val, 436 MTK_HWV_POLL_DELAY_US, MTK_HWV_POLL_TIMEOUT); 437 if (ret) 438 goto err_disable_subsys_clks; 439 440 clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); 441 clk_bulk_disable_unprepare(pd->num_clks, pd->clks); 442 443 scpsys_regulator_disable(pd->supply); 444 445 if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL)) 446 scpsys_sec_infra_power_on(false); 447 448 return 0; 449 450 err_disable_subsys_clks: 451 clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); 452 err_infra: 453 if (MTK_SCPD_CAPS(pd, MTK_SCPD_INFRA_PWR_CTL)) 454 scpsys_sec_infra_power_on(false); 455 return ret; 456 }; 457 458 static int scpsys_ctl_pwrseq_on(struct scpsys_domain *pd) 459 { 460 struct scpsys *scpsys = pd->scpsys; 461 bool do_rtff_nrestore, tmp; 462 int ret; 463 464 /* subsys power on */ 465 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT); 466 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT); 467 468 /* wait until PWR_ACK = 1 */ 469 ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US, 470 MTK_POLL_TIMEOUT); 471 if (ret < 0) 472 return ret; 473 474 if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY) 475 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS); 476 477 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT); 478 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT); 479 480 /* Wait for RTFF HW to sync buck isolation state if this is PCIe PHY RTFF */ 481 if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY) 482 udelay(5); 483 484 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); 485 486 /* 487 * RTFF HW state may be modified by secure world or remote processors. 488 * 489 * With the only exception of STOR_UFS, which always needs save/restore, 490 * check if this power domain's RTFF is already on before trying to do 491 * the NRESTORE procedure, otherwise the system will lock up. 492 */ 493 switch (pd->data->rtff_type) { 494 case SCPSYS_RTFF_TYPE_GENERIC: 495 case SCPSYS_RTFF_TYPE_PCIE_PHY: 496 { 497 u32 ctl_status; 498 499 regmap_read(scpsys->base, pd->data->ctl_offs, &ctl_status); 500 do_rtff_nrestore = ctl_status & PWR_RTFF_SAVE_FLAG; 501 break; 502 } 503 case SCPSYS_RTFF_TYPE_STOR_UFS: 504 /* STOR_UFS always needs NRESTORE */ 505 do_rtff_nrestore = true; 506 break; 507 default: 508 do_rtff_nrestore = false; 509 break; 510 } 511 512 /* Return early if RTFF NRESTORE shall not be done */ 513 if (!do_rtff_nrestore) 514 return 0; 515 516 switch (pd->data->rtff_type) { 517 case SCPSYS_RTFF_TYPE_GENERIC: 518 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG); 519 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS); 520 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE); 521 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE); 522 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS); 523 break; 524 case SCPSYS_RTFF_TYPE_PCIE_PHY: 525 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG); 526 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE); 527 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE); 528 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS); 529 break; 530 case SCPSYS_RTFF_TYPE_STOR_UFS: 531 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS); 532 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE); 533 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE); 534 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS); 535 break; 536 default: 537 break; 538 } 539 540 return 0; 541 } 542 543 static void scpsys_ctl_pwrseq_off(struct scpsys_domain *pd) 544 { 545 struct scpsys *scpsys = pd->scpsys; 546 547 switch (pd->data->rtff_type) { 548 case SCPSYS_RTFF_TYPE_GENERIC: 549 case SCPSYS_RTFF_TYPE_PCIE_PHY: 550 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS); 551 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE); 552 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE); 553 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS); 554 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG); 555 break; 556 case SCPSYS_RTFF_TYPE_STOR_UFS: 557 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS); 558 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE); 559 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE); 560 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS); 561 break; 562 default: 563 break; 564 } 565 566 /* subsys power off */ 567 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT); 568 569 /* Wait for RTFF HW to sync buck isolation state if this is PCIe PHY RTFF */ 570 if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY) 571 udelay(1); 572 573 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT); 574 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); 575 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT); 576 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT); 577 } 578 579 static int scpsys_modem_pwrseq_on(struct scpsys_domain *pd) 580 { 581 struct scpsys *scpsys = pd->scpsys; 582 bool tmp; 583 int ret; 584 585 if (!MTK_SCPD_CAPS(pd, MTK_SCPD_SKIP_RESET_B)) 586 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); 587 588 regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT); 589 590 /* wait until PWR_ACK = 1 */ 591 ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US, 592 MTK_POLL_TIMEOUT); 593 if (ret < 0) 594 return ret; 595 596 return 0; 597 } 598 599 static void scpsys_modem_pwrseq_off(struct scpsys_domain *pd) 600 { 601 struct scpsys *scpsys = pd->scpsys; 602 603 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT); 604 605 if (!MTK_SCPD_CAPS(pd, MTK_SCPD_SKIP_RESET_B)) 606 regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT); 607 } 608 609 static int scpsys_power_on(struct generic_pm_domain *genpd) 610 { 611 struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd); 612 struct scpsys *scpsys = pd->scpsys; 613 int ret; 614 615 ret = scpsys_regulator_enable(pd->supply); 616 if (ret) 617 return ret; 618 619 ret = clk_bulk_prepare_enable(pd->num_clks, pd->clks); 620 if (ret) 621 goto err_reg; 622 623 if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO)) 624 regmap_clear_bits(scpsys->base, pd->data->ext_buck_iso_offs, 625 pd->data->ext_buck_iso_mask); 626 627 if (MTK_SCPD_CAPS(pd, MTK_SCPD_MODEM_PWRSEQ)) 628 ret = scpsys_modem_pwrseq_on(pd); 629 else 630 ret = scpsys_ctl_pwrseq_on(pd); 631 632 if (ret) 633 goto err_pwr_ack; 634 635 /* 636 * In few Mediatek platforms(e.g. MT6779), the bus protect policy is 637 * stricter, which leads to bus protect release must be prior to bus 638 * access. 639 */ 640 if (!MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION)) { 641 ret = clk_bulk_prepare_enable(pd->num_subsys_clks, 642 pd->subsys_clks); 643 if (ret) 644 goto err_pwr_ack; 645 } 646 647 ret = scpsys_sram_enable(pd); 648 if (ret < 0) 649 goto err_disable_subsys_clks; 650 651 ret = scpsys_bus_protect_disable(pd); 652 if (ret < 0) 653 goto err_disable_sram; 654 655 if (MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION)) { 656 ret = clk_bulk_prepare_enable(pd->num_subsys_clks, 657 pd->subsys_clks); 658 if (ret) 659 goto err_enable_bus_protect; 660 } 661 662 return 0; 663 664 err_enable_bus_protect: 665 scpsys_bus_protect_enable(pd); 666 err_disable_sram: 667 scpsys_sram_disable(pd); 668 err_disable_subsys_clks: 669 if (!MTK_SCPD_CAPS(pd, MTK_SCPD_STRICT_BUS_PROTECTION)) 670 clk_bulk_disable_unprepare(pd->num_subsys_clks, 671 pd->subsys_clks); 672 err_pwr_ack: 673 clk_bulk_disable_unprepare(pd->num_clks, pd->clks); 674 err_reg: 675 scpsys_regulator_disable(pd->supply); 676 return ret; 677 } 678 679 static int scpsys_power_off(struct generic_pm_domain *genpd) 680 { 681 struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd); 682 struct scpsys *scpsys = pd->scpsys; 683 bool tmp; 684 int ret; 685 686 ret = scpsys_bus_protect_enable(pd); 687 if (ret < 0) 688 return ret; 689 690 ret = scpsys_sram_disable(pd); 691 if (ret < 0) 692 return ret; 693 694 if (pd->data->ext_buck_iso_offs && MTK_SCPD_CAPS(pd, MTK_SCPD_EXT_BUCK_ISO)) 695 regmap_set_bits(scpsys->base, pd->data->ext_buck_iso_offs, 696 pd->data->ext_buck_iso_mask); 697 698 clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks); 699 700 if (MTK_SCPD_CAPS(pd, MTK_SCPD_MODEM_PWRSEQ)) 701 scpsys_modem_pwrseq_off(pd); 702 else 703 scpsys_ctl_pwrseq_off(pd); 704 705 /* wait until PWR_ACK = 0 */ 706 ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, !tmp, MTK_POLL_DELAY_US, 707 MTK_POLL_TIMEOUT); 708 if (ret < 0) 709 return ret; 710 711 clk_bulk_disable_unprepare(pd->num_clks, pd->clks); 712 713 scpsys_regulator_disable(pd->supply); 714 715 return 0; 716 } 717 718 static struct 719 generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_node *node) 720 { 721 const struct scpsys_domain_data *domain_data; 722 const struct scpsys_hwv_domain_data *hwv_domain_data; 723 struct scpsys_domain *pd; 724 struct property *prop; 725 const char *clk_name; 726 int i, ret, num_clks; 727 struct clk *clk; 728 int clk_ind = 0; 729 u32 id; 730 731 ret = of_property_read_u32(node, "reg", &id); 732 if (ret) { 733 dev_err(scpsys->dev, "%pOF: failed to retrieve domain id from reg: %d\n", 734 node, ret); 735 return ERR_PTR(-EINVAL); 736 } 737 738 switch (scpsys->soc_data->type) { 739 case SCPSYS_MTCMOS_TYPE_DIRECT_CTL: 740 if (id >= scpsys->soc_data->num_domains) { 741 dev_err(scpsys->dev, "%pOF: invalid domain id %d\n", node, id); 742 return ERR_PTR(-EINVAL); 743 } 744 745 domain_data = &scpsys->soc_data->domains_data[id]; 746 hwv_domain_data = NULL; 747 748 if (domain_data->sta_mask == 0) { 749 dev_err(scpsys->dev, "%pOF: undefined domain id %d\n", node, id); 750 return ERR_PTR(-EINVAL); 751 } 752 753 break; 754 case SCPSYS_MTCMOS_TYPE_HW_VOTER: 755 if (id >= scpsys->soc_data->num_hwv_domains) { 756 dev_err(scpsys->dev, "%pOF: invalid HWV domain id %d\n", node, id); 757 return ERR_PTR(-EINVAL); 758 } 759 760 domain_data = NULL; 761 hwv_domain_data = &scpsys->soc_data->hwv_domains_data[id]; 762 763 break; 764 default: 765 return ERR_PTR(-EINVAL); 766 } 767 768 pd = devm_kzalloc(scpsys->dev, sizeof(*pd), GFP_KERNEL); 769 if (!pd) 770 return ERR_PTR(-ENOMEM); 771 772 pd->data = domain_data; 773 pd->hwv_data = hwv_domain_data; 774 pd->scpsys = scpsys; 775 776 if (MTK_SCPD_CAPS(pd, MTK_SCPD_DOMAIN_SUPPLY)) { 777 pd->supply = devm_of_regulator_get_optional(scpsys->dev, node, "domain"); 778 if (IS_ERR(pd->supply)) 779 return dev_err_cast_probe(scpsys->dev, pd->supply, 780 "%pOF: failed to get power supply.\n", 781 node); 782 } 783 784 num_clks = of_clk_get_parent_count(node); 785 if (num_clks > 0) { 786 /* Calculate number of subsys_clks */ 787 of_property_for_each_string(node, "clock-names", prop, clk_name) { 788 char *subsys; 789 790 subsys = strchr(clk_name, '-'); 791 if (subsys) 792 pd->num_subsys_clks++; 793 else 794 pd->num_clks++; 795 } 796 797 pd->clks = devm_kcalloc(scpsys->dev, pd->num_clks, sizeof(*pd->clks), GFP_KERNEL); 798 if (!pd->clks) 799 return ERR_PTR(-ENOMEM); 800 801 pd->subsys_clks = devm_kcalloc(scpsys->dev, pd->num_subsys_clks, 802 sizeof(*pd->subsys_clks), GFP_KERNEL); 803 if (!pd->subsys_clks) 804 return ERR_PTR(-ENOMEM); 805 806 } 807 808 for (i = 0; i < pd->num_clks; i++) { 809 clk = of_clk_get(node, i); 810 if (IS_ERR(clk)) { 811 ret = PTR_ERR(clk); 812 dev_err_probe(scpsys->dev, ret, 813 "%pOF: failed to get clk at index %d\n", node, i); 814 goto err_put_clocks; 815 } 816 817 pd->clks[clk_ind++].clk = clk; 818 } 819 820 for (i = 0; i < pd->num_subsys_clks; i++) { 821 clk = of_clk_get(node, i + clk_ind); 822 if (IS_ERR(clk)) { 823 ret = PTR_ERR(clk); 824 dev_err_probe(scpsys->dev, ret, 825 "%pOF: failed to get clk at index %d\n", node, 826 i + clk_ind); 827 goto err_put_subsys_clocks; 828 } 829 830 pd->subsys_clks[i].clk = clk; 831 } 832 833 if (scpsys->domains[id]) { 834 ret = -EINVAL; 835 dev_err(scpsys->dev, 836 "power domain with id %d already exists, check your device-tree\n", id); 837 goto err_put_subsys_clocks; 838 } 839 840 if (pd->data && pd->data->name) 841 pd->genpd.name = pd->data->name; 842 else if (pd->hwv_data && pd->hwv_data->name) 843 pd->genpd.name = pd->hwv_data->name; 844 else 845 pd->genpd.name = node->name; 846 847 if (scpsys->soc_data->type == SCPSYS_MTCMOS_TYPE_DIRECT_CTL) { 848 pd->genpd.power_off = scpsys_power_off; 849 pd->genpd.power_on = scpsys_power_on; 850 } else { 851 pd->genpd.power_off = scpsys_hwv_power_off; 852 pd->genpd.power_on = scpsys_hwv_power_on; 853 854 /* HW-Voter code can be invoked in atomic context */ 855 pd->genpd.flags |= GENPD_FLAG_IRQ_SAFE; 856 } 857 858 /* 859 * Initially turn on all domains to make the domains usable 860 * with !CONFIG_PM and to get the hardware in sync with the 861 * software. The unused domains will be switched off during 862 * late_init time. 863 */ 864 if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) { 865 if (scpsys_domain_is_on(pd)) 866 dev_warn(scpsys->dev, 867 "%pOF: A default off power domain has been ON\n", node); 868 } else { 869 ret = pd->genpd.power_on(&pd->genpd); 870 if (ret < 0) { 871 dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret); 872 goto err_put_subsys_clocks; 873 } 874 875 if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON)) 876 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; 877 } 878 879 if (MTK_SCPD_CAPS(pd, MTK_SCPD_ACTIVE_WAKEUP)) 880 pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP; 881 882 if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) 883 pm_genpd_init(&pd->genpd, NULL, true); 884 else 885 pm_genpd_init(&pd->genpd, NULL, false); 886 887 scpsys->domains[id] = &pd->genpd; 888 889 return scpsys->pd_data.domains[id]; 890 891 err_put_subsys_clocks: 892 clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks); 893 err_put_clocks: 894 clk_bulk_put(pd->num_clks, pd->clks); 895 return ERR_PTR(ret); 896 } 897 898 static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *parent) 899 { 900 struct generic_pm_domain *child_pd, *parent_pd; 901 struct device_node *child; 902 int ret; 903 904 for_each_child_of_node(parent, child) { 905 u32 id; 906 907 ret = of_property_read_u32(parent, "reg", &id); 908 if (ret) { 909 dev_err(scpsys->dev, "%pOF: failed to get parent domain id\n", child); 910 goto err_put_node; 911 } 912 913 if (!scpsys->pd_data.domains[id]) { 914 ret = -EINVAL; 915 dev_err(scpsys->dev, "power domain with id %d does not exist\n", id); 916 goto err_put_node; 917 } 918 919 parent_pd = scpsys->pd_data.domains[id]; 920 921 child_pd = scpsys_add_one_domain(scpsys, child); 922 if (IS_ERR(child_pd)) { 923 ret = PTR_ERR(child_pd); 924 dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n", 925 child); 926 goto err_put_node; 927 } 928 929 /* recursive call to add all subdomains */ 930 ret = scpsys_add_subdomain(scpsys, child); 931 if (ret) 932 goto err_put_node; 933 934 ret = pm_genpd_add_subdomain(parent_pd, child_pd); 935 if (ret) { 936 dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n", 937 child_pd->name, parent_pd->name); 938 goto err_put_node; 939 } else { 940 dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name, 941 child_pd->name); 942 } 943 } 944 945 return 0; 946 947 err_put_node: 948 of_node_put(child); 949 return ret; 950 } 951 952 static void scpsys_remove_one_domain(struct scpsys_domain *pd) 953 { 954 int ret; 955 956 /* 957 * We're in the error cleanup already, so we only complain, 958 * but won't emit another error on top of the original one. 959 */ 960 ret = pm_genpd_remove(&pd->genpd); 961 if (ret < 0) 962 dev_err(pd->scpsys->dev, 963 "failed to remove domain '%s' : %d - state may be inconsistent\n", 964 pd->genpd.name, ret); 965 if (scpsys_domain_is_on(pd)) 966 scpsys_power_off(&pd->genpd); 967 968 clk_bulk_put(pd->num_clks, pd->clks); 969 clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks); 970 } 971 972 static void scpsys_domain_cleanup(struct scpsys *scpsys) 973 { 974 struct generic_pm_domain *genpd; 975 struct scpsys_domain *pd; 976 int i; 977 978 for (i = scpsys->pd_data.num_domains - 1; i >= 0; i--) { 979 genpd = scpsys->pd_data.domains[i]; 980 if (genpd) { 981 pd = to_scpsys_domain(genpd); 982 scpsys_remove_one_domain(pd); 983 } 984 } 985 } 986 987 static struct device_node *scpsys_get_legacy_regmap(struct device_node *np, const char *pn) 988 { 989 struct device_node *local_node; 990 991 for_each_child_of_node(np, local_node) { 992 if (of_property_present(local_node, pn)) 993 return local_node; 994 } 995 996 return NULL; 997 } 998 999 static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *scpsys) 1000 { 1001 const u8 bp_blocks[3] = { 1002 BUS_PROT_BLOCK_INFRA, BUS_PROT_BLOCK_SMI, BUS_PROT_BLOCK_INFRA_NAO 1003 }; 1004 struct device_node *np = dev->of_node; 1005 struct device_node *node, *smi_np; 1006 int num_regmaps = 0, i, j; 1007 struct regmap *regmap[3]; 1008 1009 /* 1010 * Legacy code retrieves a maximum of three bus protection handles: 1011 * some may be optional, or may not be, so the array of bp blocks 1012 * that is normally passed in as platform data must be dynamically 1013 * built in this case. 1014 * 1015 * Here, try to retrieve all of the regmaps that the legacy code 1016 * supported and then count the number of the ones that are present, 1017 * this makes it then possible to allocate the array of bus_prot 1018 * regmaps and convert all to the new style handling. 1019 */ 1020 node = scpsys_get_legacy_regmap(np, "mediatek,infracfg"); 1021 if (node) { 1022 regmap[0] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg"); 1023 of_node_put(node); 1024 num_regmaps++; 1025 if (IS_ERR(regmap[0])) 1026 return dev_err_probe(dev, PTR_ERR(regmap[0]), 1027 "%pOF: failed to get infracfg regmap\n", 1028 node); 1029 } else { 1030 regmap[0] = NULL; 1031 } 1032 1033 node = scpsys_get_legacy_regmap(np, "mediatek,smi"); 1034 if (node) { 1035 smi_np = of_parse_phandle(node, "mediatek,smi", 0); 1036 of_node_put(node); 1037 if (!smi_np) 1038 return -ENODEV; 1039 1040 regmap[1] = device_node_to_regmap(smi_np); 1041 num_regmaps++; 1042 of_node_put(smi_np); 1043 if (IS_ERR(regmap[1])) 1044 return dev_err_probe(dev, PTR_ERR(regmap[1]), 1045 "%pOF: failed to get SMI regmap\n", 1046 node); 1047 } else { 1048 regmap[1] = NULL; 1049 } 1050 1051 node = scpsys_get_legacy_regmap(np, "mediatek,infracfg-nao"); 1052 if (node) { 1053 regmap[2] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao"); 1054 num_regmaps++; 1055 of_node_put(node); 1056 if (IS_ERR(regmap[2])) 1057 return dev_err_probe(dev, PTR_ERR(regmap[2]), 1058 "%pOF: failed to get infracfg regmap\n", 1059 node); 1060 } else { 1061 regmap[2] = NULL; 1062 } 1063 1064 scpsys->bus_prot = devm_kmalloc_array(dev, num_regmaps, 1065 sizeof(*scpsys->bus_prot), GFP_KERNEL); 1066 if (!scpsys->bus_prot) 1067 return -ENOMEM; 1068 1069 for (i = 0, j = 0; i < ARRAY_SIZE(bp_blocks); i++) { 1070 enum scpsys_bus_prot_block bp_type; 1071 1072 if (!regmap[i]) 1073 continue; 1074 1075 bp_type = bp_blocks[i]; 1076 scpsys->bus_prot_index[bp_type] = j; 1077 scpsys->bus_prot[j] = regmap[i]; 1078 1079 j++; 1080 } 1081 1082 return 0; 1083 } 1084 1085 static int scpsys_get_bus_protection(struct device *dev, struct scpsys *scpsys) 1086 { 1087 const struct scpsys_soc_data *soc = scpsys->soc_data; 1088 struct device_node *np = dev->of_node; 1089 int i, num_handles; 1090 1091 num_handles = of_count_phandle_with_args(np, "access-controllers", NULL); 1092 if (num_handles < 0 || num_handles != soc->num_bus_prot_blocks) 1093 return dev_err_probe(dev, -EINVAL, 1094 "Cannot get access controllers: expected %u, got %d\n", 1095 soc->num_bus_prot_blocks, num_handles); 1096 1097 scpsys->bus_prot = devm_kmalloc_array(dev, soc->num_bus_prot_blocks, 1098 sizeof(*scpsys->bus_prot), GFP_KERNEL); 1099 if (!scpsys->bus_prot) 1100 return -ENOMEM; 1101 1102 for (i = 0; i < soc->num_bus_prot_blocks; i++) { 1103 enum scpsys_bus_prot_block bp_type; 1104 struct device_node *node; 1105 1106 node = of_parse_phandle(np, "access-controllers", i); 1107 if (!node) 1108 return -EINVAL; 1109 1110 /* 1111 * Index the bus protection regmaps so that we don't have to 1112 * find the right one by type with a loop at every execution 1113 * of power sequence(s). 1114 */ 1115 bp_type = soc->bus_prot_blocks[i]; 1116 scpsys->bus_prot_index[bp_type] = i; 1117 1118 scpsys->bus_prot[i] = device_node_to_regmap(node); 1119 of_node_put(node); 1120 if (IS_ERR_OR_NULL(scpsys->bus_prot[i])) 1121 return dev_err_probe(dev, scpsys->bus_prot[i] ? 1122 PTR_ERR(scpsys->bus_prot[i]) : -ENXIO, 1123 "Cannot get regmap for access controller %d\n", i); 1124 } 1125 1126 return 0; 1127 } 1128 1129 static const struct of_device_id scpsys_of_match[] = { 1130 { 1131 .compatible = "mediatek,mt6735-power-controller", 1132 .data = &mt6735_scpsys_data, 1133 }, 1134 { 1135 .compatible = "mediatek,mt6795-power-controller", 1136 .data = &mt6795_scpsys_data, 1137 }, 1138 { 1139 .compatible = "mediatek,mt6893-power-controller", 1140 .data = &mt6893_scpsys_data, 1141 }, 1142 { 1143 .compatible = "mediatek,mt8167-power-controller", 1144 .data = &mt8167_scpsys_data, 1145 }, 1146 { 1147 .compatible = "mediatek,mt8173-power-controller", 1148 .data = &mt8173_scpsys_data, 1149 }, 1150 { 1151 .compatible = "mediatek,mt8183-power-controller", 1152 .data = &mt8183_scpsys_data, 1153 }, 1154 { 1155 .compatible = "mediatek,mt8186-power-controller", 1156 .data = &mt8186_scpsys_data, 1157 }, 1158 { 1159 .compatible = "mediatek,mt8188-power-controller", 1160 .data = &mt8188_scpsys_data, 1161 }, 1162 { 1163 .compatible = "mediatek,mt8192-power-controller", 1164 .data = &mt8192_scpsys_data, 1165 }, 1166 { 1167 .compatible = "mediatek,mt8195-power-controller", 1168 .data = &mt8195_scpsys_data, 1169 }, 1170 { 1171 .compatible = "mediatek,mt8196-power-controller", 1172 .data = &mt8196_scpsys_data, 1173 }, 1174 { 1175 .compatible = "mediatek,mt8196-hwv-hfrp-power-controller", 1176 .data = &mt8196_hfrpsys_hwv_data, 1177 }, 1178 { 1179 .compatible = "mediatek,mt8196-hwv-scp-power-controller", 1180 .data = &mt8196_scpsys_hwv_data, 1181 }, 1182 { 1183 .compatible = "mediatek,mt8365-power-controller", 1184 .data = &mt8365_scpsys_data, 1185 }, 1186 { } 1187 }; 1188 1189 static int scpsys_probe(struct platform_device *pdev) 1190 { 1191 struct device *dev = &pdev->dev; 1192 struct device_node *np = dev->of_node; 1193 const struct scpsys_soc_data *soc; 1194 struct device_node *node; 1195 struct device *parent; 1196 struct scpsys *scpsys; 1197 int num_domains, ret; 1198 1199 soc = of_device_get_match_data(&pdev->dev); 1200 if (!soc) { 1201 dev_err(&pdev->dev, "no power controller data\n"); 1202 return -EINVAL; 1203 } 1204 1205 num_domains = soc->num_domains + soc->num_hwv_domains; 1206 1207 scpsys = devm_kzalloc(dev, struct_size(scpsys, domains, num_domains), GFP_KERNEL); 1208 if (!scpsys) 1209 return -ENOMEM; 1210 1211 scpsys->dev = dev; 1212 scpsys->soc_data = soc; 1213 1214 scpsys->pd_data.domains = scpsys->domains; 1215 scpsys->pd_data.num_domains = soc->num_domains; 1216 1217 parent = dev->parent; 1218 if (!parent) { 1219 dev_err(dev, "no parent for syscon devices\n"); 1220 return -ENODEV; 1221 } 1222 1223 scpsys->base = syscon_node_to_regmap(parent->of_node); 1224 if (IS_ERR(scpsys->base)) { 1225 dev_err(dev, "no regmap available\n"); 1226 return PTR_ERR(scpsys->base); 1227 } 1228 1229 if (of_find_property(np, "access-controllers", NULL)) 1230 ret = scpsys_get_bus_protection(dev, scpsys); 1231 else 1232 ret = scpsys_get_bus_protection_legacy(dev, scpsys); 1233 1234 if (ret) 1235 return ret; 1236 1237 ret = -ENODEV; 1238 for_each_available_child_of_node(np, node) { 1239 struct generic_pm_domain *domain; 1240 1241 domain = scpsys_add_one_domain(scpsys, node); 1242 if (IS_ERR(domain)) { 1243 ret = PTR_ERR(domain); 1244 of_node_put(node); 1245 goto err_cleanup_domains; 1246 } 1247 1248 ret = scpsys_add_subdomain(scpsys, node); 1249 if (ret) { 1250 of_node_put(node); 1251 goto err_cleanup_domains; 1252 } 1253 } 1254 1255 if (ret) { 1256 dev_dbg(dev, "no power domains present\n"); 1257 return ret; 1258 } 1259 1260 ret = of_genpd_add_provider_onecell(np, &scpsys->pd_data); 1261 if (ret) { 1262 dev_err(dev, "failed to add provider: %d\n", ret); 1263 goto err_cleanup_domains; 1264 } 1265 1266 return 0; 1267 1268 err_cleanup_domains: 1269 scpsys_domain_cleanup(scpsys); 1270 return ret; 1271 } 1272 1273 static struct platform_driver scpsys_pm_domain_driver = { 1274 .probe = scpsys_probe, 1275 .driver = { 1276 .name = "mtk-power-controller", 1277 .suppress_bind_attrs = true, 1278 .of_match_table = scpsys_of_match, 1279 }, 1280 }; 1281 builtin_platform_driver(scpsys_pm_domain_driver); 1282