1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/delay.h> 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/jiffies.h> 11 #include <linux/kernel.h> 12 #include <linux/ktime.h> 13 #include <linux/pm_domain.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/regmap.h> 16 #include <linux/regulator/consumer.h> 17 #include <linux/reset-controller.h> 18 #include <linux/slab.h> 19 #include "gdsc.h" 20 21 #define PWR_ON_MASK BIT(31) 22 #define EN_REST_WAIT_MASK GENMASK_ULL(23, 20) 23 #define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16) 24 #define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12) 25 #define SW_OVERRIDE_MASK BIT(2) 26 #define HW_CONTROL_MASK BIT(1) 27 #define SW_COLLAPSE_MASK BIT(0) 28 #define GMEM_CLAMP_IO_MASK BIT(0) 29 #define GMEM_RESET_MASK BIT(4) 30 31 /* CFG_GDSCR */ 32 #define GDSC_POWER_UP_COMPLETE BIT(16) 33 #define GDSC_POWER_DOWN_COMPLETE BIT(15) 34 #define GDSC_RETAIN_FF_ENABLE BIT(11) 35 #define CFG_GDSCR_OFFSET 0x4 36 37 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ 38 #define EN_REST_WAIT_VAL 0x2 39 #define EN_FEW_WAIT_VAL 0x8 40 #define CLK_DIS_WAIT_VAL 0x2 41 42 /* Transition delay shifts */ 43 #define EN_REST_WAIT_SHIFT 20 44 #define EN_FEW_WAIT_SHIFT 16 45 #define CLK_DIS_WAIT_SHIFT 12 46 47 #define RETAIN_MEM BIT(14) 48 #define RETAIN_PERIPH BIT(13) 49 50 #define TIMEOUT_US 500 51 52 #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd) 53 54 enum gdsc_status { 55 GDSC_OFF, 56 GDSC_ON 57 }; 58 59 static int gdsc_pm_runtime_get(struct gdsc *sc) 60 { 61 if (!sc->dev) 62 return 0; 63 64 return pm_runtime_resume_and_get(sc->dev); 65 } 66 67 static int gdsc_pm_runtime_put(struct gdsc *sc) 68 { 69 if (!sc->dev) 70 return 0; 71 72 return pm_runtime_put_sync(sc->dev); 73 } 74 75 /* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */ 76 static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status) 77 { 78 unsigned int reg; 79 u32 val; 80 int ret; 81 82 if (sc->flags & POLL_CFG_GDSCR) 83 reg = sc->gdscr + CFG_GDSCR_OFFSET; 84 else if (sc->gds_hw_ctrl) 85 reg = sc->gds_hw_ctrl; 86 else 87 reg = sc->gdscr; 88 89 ret = regmap_read(sc->regmap, reg, &val); 90 if (ret) 91 return ret; 92 93 if (sc->flags & POLL_CFG_GDSCR) { 94 switch (status) { 95 case GDSC_ON: 96 return !!(val & GDSC_POWER_UP_COMPLETE); 97 case GDSC_OFF: 98 return !!(val & GDSC_POWER_DOWN_COMPLETE); 99 } 100 } 101 102 switch (status) { 103 case GDSC_ON: 104 return !!(val & PWR_ON_MASK); 105 case GDSC_OFF: 106 return !(val & PWR_ON_MASK); 107 } 108 109 return -EINVAL; 110 } 111 112 static int gdsc_hwctrl(struct gdsc *sc, bool en) 113 { 114 u32 val = en ? HW_CONTROL_MASK : 0; 115 116 return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val); 117 } 118 119 static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status) 120 { 121 ktime_t start; 122 123 start = ktime_get(); 124 do { 125 if (gdsc_check_status(sc, status)) 126 return 0; 127 } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US); 128 129 if (gdsc_check_status(sc, status)) 130 return 0; 131 132 return -ETIMEDOUT; 133 } 134 135 static int gdsc_update_collapse_bit(struct gdsc *sc, bool val) 136 { 137 u32 reg, mask; 138 int ret; 139 140 if (sc->collapse_mask) { 141 reg = sc->collapse_ctrl; 142 mask = sc->collapse_mask; 143 } else { 144 reg = sc->gdscr; 145 mask = SW_COLLAPSE_MASK; 146 } 147 148 ret = regmap_update_bits(sc->regmap, reg, mask, val ? mask : 0); 149 if (ret) 150 return ret; 151 152 return 0; 153 } 154 155 static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status) 156 { 157 int ret; 158 159 if (status == GDSC_ON && sc->rsupply) { 160 ret = regulator_enable(sc->rsupply); 161 if (ret < 0) 162 return ret; 163 } 164 165 ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF); 166 167 /* If disabling votable gdscs, don't poll on status */ 168 if ((sc->flags & VOTABLE) && status == GDSC_OFF) { 169 /* 170 * Add a short delay here to ensure that an enable 171 * right after it was disabled does not put it in an 172 * unknown state 173 */ 174 udelay(TIMEOUT_US); 175 return 0; 176 } 177 178 if (sc->gds_hw_ctrl) { 179 /* 180 * The gds hw controller asserts/de-asserts the status bit soon 181 * after it receives a power on/off request from a master. 182 * The controller then takes around 8 xo cycles to start its 183 * internal state machine and update the status bit. During 184 * this time, the status bit does not reflect the true status 185 * of the core. 186 * Add a delay of 1 us between writing to the SW_COLLAPSE bit 187 * and polling the status bit. 188 */ 189 udelay(1); 190 } 191 192 ret = gdsc_poll_status(sc, status); 193 WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n"); 194 195 if (!ret && status == GDSC_OFF && sc->rsupply) { 196 ret = regulator_disable(sc->rsupply); 197 if (ret < 0) 198 return ret; 199 } 200 201 return ret; 202 } 203 204 static inline int gdsc_deassert_reset(struct gdsc *sc) 205 { 206 int i; 207 208 for (i = 0; i < sc->reset_count; i++) 209 sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]); 210 return 0; 211 } 212 213 static inline int gdsc_assert_reset(struct gdsc *sc) 214 { 215 int i; 216 217 for (i = 0; i < sc->reset_count; i++) 218 sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]); 219 return 0; 220 } 221 222 static inline void gdsc_force_mem_on(struct gdsc *sc) 223 { 224 int i; 225 u32 mask = RETAIN_MEM; 226 227 if (!(sc->flags & NO_RET_PERIPH)) 228 mask |= RETAIN_PERIPH; 229 230 for (i = 0; i < sc->cxc_count; i++) 231 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask); 232 } 233 234 static inline void gdsc_clear_mem_on(struct gdsc *sc) 235 { 236 int i; 237 u32 mask = RETAIN_MEM; 238 239 if (!(sc->flags & NO_RET_PERIPH)) 240 mask |= RETAIN_PERIPH; 241 242 for (i = 0; i < sc->cxc_count; i++) 243 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0); 244 } 245 246 static inline void gdsc_deassert_clamp_io(struct gdsc *sc) 247 { 248 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 249 GMEM_CLAMP_IO_MASK, 0); 250 } 251 252 static inline void gdsc_assert_clamp_io(struct gdsc *sc) 253 { 254 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 255 GMEM_CLAMP_IO_MASK, 1); 256 } 257 258 static inline void gdsc_assert_reset_aon(struct gdsc *sc) 259 { 260 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 261 GMEM_RESET_MASK, 1); 262 udelay(1); 263 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, 264 GMEM_RESET_MASK, 0); 265 } 266 267 static void gdsc_retain_ff_on(struct gdsc *sc) 268 { 269 u32 mask = GDSC_RETAIN_FF_ENABLE; 270 271 regmap_update_bits(sc->regmap, sc->gdscr, mask, mask); 272 } 273 274 static int _gdsc_enable(struct gdsc *sc) 275 { 276 int ret; 277 278 if (sc->pwrsts == PWRSTS_ON) 279 return gdsc_deassert_reset(sc); 280 281 if (sc->flags & SW_RESET) { 282 gdsc_assert_reset(sc); 283 udelay(1); 284 gdsc_deassert_reset(sc); 285 } 286 287 if (sc->flags & CLAMP_IO) { 288 if (sc->flags & AON_RESET) 289 gdsc_assert_reset_aon(sc); 290 gdsc_deassert_clamp_io(sc); 291 } 292 293 ret = gdsc_toggle_logic(sc, GDSC_ON); 294 if (ret) 295 return ret; 296 297 if (sc->pwrsts & PWRSTS_OFF) 298 gdsc_force_mem_on(sc); 299 300 /* 301 * If clocks to this power domain were already on, they will take an 302 * additional 4 clock cycles to re-enable after the power domain is 303 * enabled. Delay to account for this. A delay is also needed to ensure 304 * clocks are not enabled within 400ns of enabling power to the 305 * memories. 306 */ 307 udelay(1); 308 309 /* Turn on HW trigger mode if supported */ 310 if (sc->flags & HW_CTRL) { 311 ret = gdsc_hwctrl(sc, true); 312 if (ret) 313 return ret; 314 /* 315 * Wait for the GDSC to go through a power down and 316 * up cycle. In case a firmware ends up polling status 317 * bits for the gdsc, it might read an 'on' status before 318 * the GDSC can finish the power cycle. 319 * We wait 1us before returning to ensure the firmware 320 * can't immediately poll the status bits. 321 */ 322 udelay(1); 323 } 324 325 if (sc->flags & RETAIN_FF_ENABLE) 326 gdsc_retain_ff_on(sc); 327 328 return 0; 329 } 330 331 static int gdsc_enable(struct generic_pm_domain *domain) 332 { 333 struct gdsc *sc = domain_to_gdsc(domain); 334 int ret; 335 336 ret = gdsc_pm_runtime_get(sc); 337 if (ret) 338 return ret; 339 340 return _gdsc_enable(sc); 341 } 342 343 static int _gdsc_disable(struct gdsc *sc) 344 { 345 int ret; 346 347 if (sc->pwrsts == PWRSTS_ON) 348 return gdsc_assert_reset(sc); 349 350 /* Turn off HW trigger mode if supported */ 351 if (sc->flags & HW_CTRL) { 352 ret = gdsc_hwctrl(sc, false); 353 if (ret < 0) 354 return ret; 355 /* 356 * Wait for the GDSC to go through a power down and 357 * up cycle. In case we end up polling status 358 * bits for the gdsc before the power cycle is completed 359 * it might read an 'on' status wrongly. 360 */ 361 udelay(1); 362 363 ret = gdsc_poll_status(sc, GDSC_ON); 364 if (ret) 365 return ret; 366 } 367 368 if (sc->pwrsts & PWRSTS_OFF) 369 gdsc_clear_mem_on(sc); 370 371 /* 372 * If the GDSC supports only a Retention state, apart from ON, 373 * leave it in ON state. 374 * There is no SW control to transition the GDSC into 375 * Retention state. This happens in HW when the parent 376 * domain goes down to a Low power state 377 */ 378 if (sc->pwrsts == PWRSTS_RET_ON) 379 return 0; 380 381 ret = gdsc_toggle_logic(sc, GDSC_OFF); 382 if (ret) 383 return ret; 384 385 if (sc->flags & CLAMP_IO) 386 gdsc_assert_clamp_io(sc); 387 388 return 0; 389 } 390 391 static int gdsc_disable(struct generic_pm_domain *domain) 392 { 393 struct gdsc *sc = domain_to_gdsc(domain); 394 int ret; 395 396 ret = _gdsc_disable(sc); 397 398 gdsc_pm_runtime_put(sc); 399 400 return ret; 401 } 402 403 static int gdsc_init(struct gdsc *sc) 404 { 405 u32 mask, val; 406 int on, ret; 407 408 /* 409 * Disable HW trigger: collapse/restore occur based on registers writes. 410 * Disable SW override: Use hardware state-machine for sequencing. 411 * Configure wait time between states. 412 */ 413 mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | 414 EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; 415 416 if (!sc->en_rest_wait_val) 417 sc->en_rest_wait_val = EN_REST_WAIT_VAL; 418 if (!sc->en_few_wait_val) 419 sc->en_few_wait_val = EN_FEW_WAIT_VAL; 420 if (!sc->clk_dis_wait_val) 421 sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; 422 423 val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | 424 sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | 425 sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; 426 427 ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); 428 if (ret) 429 return ret; 430 431 /* Force gdsc ON if only ON state is supported */ 432 if (sc->pwrsts == PWRSTS_ON) { 433 ret = gdsc_toggle_logic(sc, GDSC_ON); 434 if (ret) 435 return ret; 436 } 437 438 on = gdsc_check_status(sc, GDSC_ON); 439 if (on < 0) 440 return on; 441 442 if (on) { 443 /* The regulator must be on, sync the kernel state */ 444 if (sc->rsupply) { 445 ret = regulator_enable(sc->rsupply); 446 if (ret < 0) 447 return ret; 448 } 449 450 /* ...and the power-domain */ 451 ret = gdsc_pm_runtime_get(sc); 452 if (ret) 453 goto err_disable_supply; 454 455 /* 456 * Votable GDSCs can be ON due to Vote from other masters. 457 * If a Votable GDSC is ON, make sure we have a Vote. 458 */ 459 if (sc->flags & VOTABLE) { 460 ret = gdsc_update_collapse_bit(sc, false); 461 if (ret) 462 goto err_put_rpm; 463 } 464 465 /* Turn on HW trigger mode if supported */ 466 if (sc->flags & HW_CTRL) { 467 ret = gdsc_hwctrl(sc, true); 468 if (ret < 0) 469 goto err_put_rpm; 470 } 471 472 /* 473 * Make sure the retain bit is set if the GDSC is already on, 474 * otherwise we end up turning off the GDSC and destroying all 475 * the register contents that we thought we were saving. 476 */ 477 if (sc->flags & RETAIN_FF_ENABLE) 478 gdsc_retain_ff_on(sc); 479 } else if (sc->flags & ALWAYS_ON) { 480 /* If ALWAYS_ON GDSCs are not ON, turn them ON */ 481 gdsc_enable(&sc->pd); 482 on = true; 483 } 484 485 if (on || (sc->pwrsts & PWRSTS_RET)) 486 gdsc_force_mem_on(sc); 487 else 488 gdsc_clear_mem_on(sc); 489 490 if (sc->flags & ALWAYS_ON) 491 sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; 492 if (!sc->pd.power_off) 493 sc->pd.power_off = gdsc_disable; 494 if (!sc->pd.power_on) 495 sc->pd.power_on = gdsc_enable; 496 497 ret = pm_genpd_init(&sc->pd, NULL, !on); 498 if (ret) 499 goto err_put_rpm; 500 501 return 0; 502 503 err_put_rpm: 504 if (on) 505 gdsc_pm_runtime_put(sc); 506 err_disable_supply: 507 if (on && sc->rsupply) 508 regulator_disable(sc->rsupply); 509 510 return ret; 511 } 512 513 int gdsc_register(struct gdsc_desc *desc, 514 struct reset_controller_dev *rcdev, struct regmap *regmap) 515 { 516 int i, ret; 517 struct genpd_onecell_data *data; 518 struct device *dev = desc->dev; 519 struct gdsc **scs = desc->scs; 520 size_t num = desc->num; 521 522 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 523 if (!data) 524 return -ENOMEM; 525 526 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains), 527 GFP_KERNEL); 528 if (!data->domains) 529 return -ENOMEM; 530 531 for (i = 0; i < num; i++) { 532 if (!scs[i] || !scs[i]->supply) 533 continue; 534 535 scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply); 536 if (IS_ERR(scs[i]->rsupply)) 537 return PTR_ERR(scs[i]->rsupply); 538 } 539 540 data->num_domains = num; 541 for (i = 0; i < num; i++) { 542 if (!scs[i]) 543 continue; 544 if (pm_runtime_enabled(dev)) 545 scs[i]->dev = dev; 546 scs[i]->regmap = regmap; 547 scs[i]->rcdev = rcdev; 548 ret = gdsc_init(scs[i]); 549 if (ret) 550 return ret; 551 data->domains[i] = &scs[i]->pd; 552 } 553 554 /* Add subdomains */ 555 for (i = 0; i < num; i++) { 556 if (!scs[i]) 557 continue; 558 if (scs[i]->parent) 559 pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); 560 else if (!IS_ERR_OR_NULL(dev->pm_domain)) 561 pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); 562 } 563 564 return of_genpd_add_provider_onecell(dev->of_node, data); 565 } 566 567 void gdsc_unregister(struct gdsc_desc *desc) 568 { 569 int i; 570 struct device *dev = desc->dev; 571 struct gdsc **scs = desc->scs; 572 size_t num = desc->num; 573 574 /* Remove subdomains */ 575 for (i = 0; i < num; i++) { 576 if (!scs[i]) 577 continue; 578 if (scs[i]->parent) 579 pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); 580 else if (!IS_ERR_OR_NULL(dev->pm_domain)) 581 pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); 582 } 583 of_genpd_del_provider(dev->of_node); 584 } 585 586 /* 587 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU 588 * running in the CX domain so the CPU doesn't need to know anything about the 589 * GX domain EXCEPT.... 590 * 591 * Hardware constraints dictate that the GX be powered down before the CX. If 592 * the GMU crashes it could leave the GX on. In order to successfully bring back 593 * the device the CPU needs to disable the GX headswitch. There being no sane 594 * way to reach in and touch that register from deep inside the GPU driver we 595 * need to set up the infrastructure to be able to ensure that the GPU can 596 * ensure that the GX is off during this super special case. We do this by 597 * defining a GX gdsc with a dummy enable function and a "default" disable 598 * function. 599 * 600 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU 601 * driver. During power up, nothing will happen from the CPU (and the GMU will 602 * power up normally but during power down this will ensure that the GX domain 603 * is *really* off - this gives us a semi standard way of doing what we need. 604 */ 605 int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain) 606 { 607 /* Do nothing but give genpd the impression that we were successful */ 608 return 0; 609 } 610 EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable); 611