1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/bitops.h> 8 #include <linux/err.h> 9 #include <linux/bug.h> 10 #include <linux/export.h> 11 #include <linux/clk.h> 12 #include <linux/clk-provider.h> 13 #include <linux/delay.h> 14 #include <linux/rational.h> 15 #include <linux/regmap.h> 16 #include <linux/math64.h> 17 #include <linux/gcd.h> 18 #include <linux/minmax.h> 19 #include <linux/slab.h> 20 21 #include <asm/div64.h> 22 23 #include "clk-rcg.h" 24 #include "common.h" 25 26 #define CMD_REG 0x0 27 #define CMD_UPDATE BIT(0) 28 #define CMD_ROOT_EN BIT(1) 29 #define CMD_DIRTY_CFG BIT(4) 30 #define CMD_DIRTY_N BIT(5) 31 #define CMD_DIRTY_M BIT(6) 32 #define CMD_DIRTY_D BIT(7) 33 #define CMD_ROOT_OFF BIT(31) 34 35 #define CFG_REG 0x4 36 #define CFG_SRC_DIV_SHIFT 0 37 #define CFG_SRC_DIV_LENGTH 8 38 #define CFG_SRC_SEL_SHIFT 8 39 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT) 40 #define CFG_MODE_SHIFT 12 41 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT) 42 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT) 43 #define CFG_HW_CLK_CTRL_MASK BIT(20) 44 45 #define M_REG 0x8 46 #define N_REG 0xc 47 #define D_REG 0x10 48 49 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG) 50 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG) 51 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG) 52 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG) 53 54 /* Dynamic Frequency Scaling */ 55 #define MAX_PERF_LEVEL 8 56 #define SE_CMD_DFSR_OFFSET 0x14 57 #define SE_CMD_DFS_EN BIT(0) 58 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level)) 59 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level)) 60 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level)) 61 62 enum freq_policy { 63 FLOOR, 64 CEIL, 65 }; 66 67 static int clk_rcg2_is_enabled(struct clk_hw *hw) 68 { 69 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 70 u32 cmd; 71 int ret; 72 73 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 74 if (ret) 75 return ret; 76 77 return (cmd & CMD_ROOT_OFF) == 0; 78 } 79 80 static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg) 81 { 82 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 83 int num_parents = clk_hw_get_num_parents(hw); 84 int i; 85 86 cfg &= CFG_SRC_SEL_MASK; 87 cfg >>= CFG_SRC_SEL_SHIFT; 88 89 for (i = 0; i < num_parents; i++) 90 if (cfg == rcg->parent_map[i].cfg) 91 return i; 92 93 pr_debug("%s: Clock %s has invalid parent, using default.\n", 94 __func__, clk_hw_get_name(hw)); 95 return 0; 96 } 97 98 static u8 clk_rcg2_get_parent(struct clk_hw *hw) 99 { 100 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 101 u32 cfg; 102 int ret; 103 104 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 105 if (ret) { 106 pr_debug("%s: Unable to read CFG register for %s\n", 107 __func__, clk_hw_get_name(hw)); 108 return 0; 109 } 110 111 return __clk_rcg2_get_parent(hw, cfg); 112 } 113 114 static int update_config(struct clk_rcg2 *rcg) 115 { 116 int count, ret; 117 u32 cmd; 118 struct clk_hw *hw = &rcg->clkr.hw; 119 const char *name = clk_hw_get_name(hw); 120 121 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 122 CMD_UPDATE, CMD_UPDATE); 123 if (ret) 124 return ret; 125 126 /* Wait for update to take effect */ 127 for (count = 500; count > 0; count--) { 128 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 129 if (ret) 130 return ret; 131 if (!(cmd & CMD_UPDATE)) 132 return 0; 133 udelay(1); 134 } 135 136 WARN(1, "%s: rcg didn't update its configuration.", name); 137 return -EBUSY; 138 } 139 140 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) 141 { 142 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 143 int ret; 144 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 145 146 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), 147 CFG_SRC_SEL_MASK, cfg); 148 if (ret) 149 return ret; 150 151 return update_config(rcg); 152 } 153 154 /** 155 * convert_to_reg_val() - Convert divisor values to hardware values. 156 * 157 * @f: Frequency table with pure m/n/pre_div parameters. 158 */ 159 static void convert_to_reg_val(struct freq_tbl *f) 160 { 161 f->pre_div *= 2; 162 f->pre_div -= 1; 163 } 164 165 /** 166 * calc_rate() - Calculate rate based on m/n:d values 167 * 168 * @rate: Parent rate. 169 * @m: Multiplier. 170 * @n: Divisor. 171 * @mode: Use zero to ignore m/n calculation. 172 * @hid_div: Pre divisor register value. Pre divisor value 173 * relates to hid_div as pre_div = (hid_div + 1) / 2. 174 * 175 * Return calculated rate according to formula: 176 * 177 * parent_rate m 178 * rate = ----------- x --- 179 * pre_div n 180 */ 181 static unsigned long 182 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) 183 { 184 if (hid_div) 185 rate = mult_frac(rate, 2, hid_div + 1); 186 187 if (mode) 188 rate = mult_frac(rate, m, n); 189 190 return rate; 191 } 192 193 static unsigned long 194 __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg) 195 { 196 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 197 u32 hid_div, m = 0, n = 0, mode = 0, mask; 198 199 if (rcg->mnd_width) { 200 mask = BIT(rcg->mnd_width) - 1; 201 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 202 m &= mask; 203 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n); 204 n = ~n; 205 n &= mask; 206 n += m; 207 mode = cfg & CFG_MODE_MASK; 208 mode >>= CFG_MODE_SHIFT; 209 } 210 211 mask = BIT(rcg->hid_width) - 1; 212 hid_div = cfg >> CFG_SRC_DIV_SHIFT; 213 hid_div &= mask; 214 215 return calc_rate(parent_rate, m, n, mode, hid_div); 216 } 217 218 static unsigned long 219 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 220 { 221 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 222 u32 cfg; 223 224 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 225 226 return __clk_rcg2_recalc_rate(hw, parent_rate, cfg); 227 } 228 229 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, 230 struct clk_rate_request *req, 231 enum freq_policy policy) 232 { 233 unsigned long clk_flags, rate = req->rate; 234 struct clk_hw *p; 235 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 236 int index; 237 238 switch (policy) { 239 case FLOOR: 240 f = qcom_find_freq_floor(f, rate); 241 break; 242 case CEIL: 243 f = qcom_find_freq(f, rate); 244 break; 245 default: 246 return -EINVAL; 247 } 248 249 if (!f) 250 return -EINVAL; 251 252 index = qcom_find_src_index(hw, rcg->parent_map, f->src); 253 if (index < 0) 254 return index; 255 256 clk_flags = clk_hw_get_flags(hw); 257 p = clk_hw_get_parent_by_index(hw, index); 258 if (!p) 259 return -EINVAL; 260 261 if (clk_flags & CLK_SET_RATE_PARENT) { 262 rate = f->freq; 263 if (f->pre_div) { 264 if (!rate) 265 rate = req->rate; 266 rate /= 2; 267 rate *= f->pre_div + 1; 268 } 269 270 if (f->n) { 271 u64 tmp = rate; 272 tmp = tmp * f->n; 273 do_div(tmp, f->m); 274 rate = tmp; 275 } 276 } else { 277 rate = clk_hw_get_rate(p); 278 } 279 req->best_parent_hw = p; 280 req->best_parent_rate = rate; 281 req->rate = f->freq; 282 283 return 0; 284 } 285 286 static const struct freq_conf * 287 __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f, 288 unsigned long req_rate) 289 { 290 unsigned long rate_diff, best_rate_diff = ULONG_MAX; 291 const struct freq_conf *conf, *best_conf = NULL; 292 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 293 const char *name = clk_hw_get_name(hw); 294 unsigned long parent_rate, rate; 295 struct clk_hw *p; 296 int index, i; 297 298 /* Exit early if only one config is defined */ 299 if (f->num_confs == 1) { 300 best_conf = f->confs; 301 goto exit; 302 } 303 304 /* Search in each provided config the one that is near the wanted rate */ 305 for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) { 306 index = qcom_find_src_index(hw, rcg->parent_map, conf->src); 307 if (index < 0) 308 continue; 309 310 p = clk_hw_get_parent_by_index(hw, index); 311 if (!p) 312 continue; 313 314 parent_rate = clk_hw_get_rate(p); 315 rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div); 316 317 if (rate == req_rate) { 318 best_conf = conf; 319 goto exit; 320 } 321 322 rate_diff = abs_diff(req_rate, rate); 323 if (rate_diff < best_rate_diff) { 324 best_rate_diff = rate_diff; 325 best_conf = conf; 326 } 327 } 328 329 /* 330 * Very unlikely. Warn if we couldn't find a correct config 331 * due to parent not found in every config. 332 */ 333 if (unlikely(!best_conf)) { 334 WARN(1, "%s: can't find a configuration for rate %lu\n", 335 name, req_rate); 336 return ERR_PTR(-EINVAL); 337 } 338 339 exit: 340 return best_conf; 341 } 342 343 static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f, 344 struct clk_rate_request *req) 345 { 346 unsigned long clk_flags, rate = req->rate; 347 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 348 const struct freq_conf *conf; 349 struct clk_hw *p; 350 int index; 351 352 f = qcom_find_freq_multi(f, rate); 353 if (!f || !f->confs) 354 return -EINVAL; 355 356 conf = __clk_rcg2_select_conf(hw, f, rate); 357 if (IS_ERR(conf)) 358 return PTR_ERR(conf); 359 index = qcom_find_src_index(hw, rcg->parent_map, conf->src); 360 if (index < 0) 361 return index; 362 363 clk_flags = clk_hw_get_flags(hw); 364 p = clk_hw_get_parent_by_index(hw, index); 365 if (!p) 366 return -EINVAL; 367 368 if (clk_flags & CLK_SET_RATE_PARENT) { 369 rate = f->freq; 370 if (conf->pre_div) { 371 if (!rate) 372 rate = req->rate; 373 rate /= 2; 374 rate *= conf->pre_div + 1; 375 } 376 377 if (conf->n) { 378 u64 tmp = rate; 379 380 tmp = tmp * conf->n; 381 do_div(tmp, conf->m); 382 rate = tmp; 383 } 384 } else { 385 rate = clk_hw_get_rate(p); 386 } 387 388 req->best_parent_hw = p; 389 req->best_parent_rate = rate; 390 req->rate = f->freq; 391 392 return 0; 393 } 394 395 static int clk_rcg2_determine_rate(struct clk_hw *hw, 396 struct clk_rate_request *req) 397 { 398 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 399 400 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL); 401 } 402 403 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, 404 struct clk_rate_request *req) 405 { 406 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 407 408 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); 409 } 410 411 static int clk_rcg2_fm_determine_rate(struct clk_hw *hw, 412 struct clk_rate_request *req) 413 { 414 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 415 416 return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req); 417 } 418 419 /** 420 * clk_rcg2_split_div() - Split multiplier that doesn't fit in n neither in pre_div. 421 * 422 * @multiplier: Multiplier to split between n and pre_div. 423 * @pre_div: Pointer to pre divisor value. 424 * @n: Pointer to n divisor value. 425 * @pre_div_max: Pre divisor maximum value. 426 */ 427 static inline void clk_rcg2_split_div(int multiplier, unsigned int *pre_div, 428 u16 *n, unsigned int pre_div_max) 429 { 430 *n = mult_frac(multiplier * *n, *pre_div, pre_div_max); 431 *pre_div = pre_div_max; 432 } 433 434 static void clk_rcg2_calc_mnd(u64 parent_rate, u64 rate, struct freq_tbl *f, 435 unsigned int mnd_max, unsigned int pre_div_max) 436 { 437 int i = 2; 438 unsigned int pre_div = 1; 439 unsigned long rates_gcd, scaled_parent_rate; 440 u16 m, n = 1, n_candidate = 1, n_max; 441 442 rates_gcd = gcd(parent_rate, rate); 443 m = div64_u64(rate, rates_gcd); 444 scaled_parent_rate = div64_u64(parent_rate, rates_gcd); 445 while (scaled_parent_rate > (mnd_max + m) * pre_div_max) { 446 // we're exceeding divisor's range, trying lower scale. 447 if (m > 1) { 448 m--; 449 scaled_parent_rate = mult_frac(scaled_parent_rate, m, (m + 1)); 450 } else { 451 // cannot lower scale, just set max divisor values. 452 f->n = mnd_max + m; 453 f->pre_div = pre_div_max; 454 f->m = m; 455 return; 456 } 457 } 458 459 n_max = m + mnd_max; 460 461 while (scaled_parent_rate > 1) { 462 while (scaled_parent_rate % i == 0) { 463 n_candidate *= i; 464 if (n_candidate < n_max) 465 n = n_candidate; 466 else if (pre_div * i < pre_div_max) 467 pre_div *= i; 468 else 469 clk_rcg2_split_div(i, &pre_div, &n, pre_div_max); 470 471 scaled_parent_rate /= i; 472 } 473 i++; 474 } 475 476 f->m = m; 477 f->n = n; 478 f->pre_div = pre_div > 1 ? pre_div : 0; 479 } 480 481 static int clk_rcg2_determine_gp_rate(struct clk_hw *hw, 482 struct clk_rate_request *req) 483 { 484 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 485 struct freq_tbl f_tbl = {}, *f = &f_tbl; 486 int mnd_max = BIT(rcg->mnd_width) - 1; 487 int hid_max = BIT(rcg->hid_width) - 1; 488 struct clk_hw *parent; 489 u64 parent_rate; 490 491 parent = clk_hw_get_parent(hw); 492 parent_rate = clk_get_rate(parent->clk); 493 if (!parent_rate) 494 return -EINVAL; 495 496 clk_rcg2_calc_mnd(parent_rate, req->rate, f, mnd_max, hid_max / 2); 497 convert_to_reg_val(f); 498 req->rate = calc_rate(parent_rate, f->m, f->n, f->n, f->pre_div); 499 500 return 0; 501 } 502 503 static int __clk_rcg2_configure_parent(struct clk_rcg2 *rcg, u8 src, u32 *_cfg) 504 { 505 struct clk_hw *hw = &rcg->clkr.hw; 506 int index = qcom_find_src_index(hw, rcg->parent_map, src); 507 508 if (index < 0) 509 return index; 510 511 *_cfg &= ~CFG_SRC_SEL_MASK; 512 *_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 513 514 return 0; 515 } 516 517 static int __clk_rcg2_configure_mnd(struct clk_rcg2 *rcg, const struct freq_tbl *f, 518 u32 *_cfg) 519 { 520 u32 cfg, mask, d_val, not2d_val, n_minus_m; 521 int ret; 522 523 if (rcg->mnd_width && f->n) { 524 mask = BIT(rcg->mnd_width) - 1; 525 ret = regmap_update_bits(rcg->clkr.regmap, 526 RCG_M_OFFSET(rcg), mask, f->m); 527 if (ret) 528 return ret; 529 530 ret = regmap_update_bits(rcg->clkr.regmap, 531 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m)); 532 if (ret) 533 return ret; 534 535 /* Calculate 2d value */ 536 d_val = f->n; 537 538 n_minus_m = f->n - f->m; 539 n_minus_m *= 2; 540 541 d_val = clamp_t(u32, d_val, f->m, n_minus_m); 542 not2d_val = ~d_val & mask; 543 544 ret = regmap_update_bits(rcg->clkr.regmap, 545 RCG_D_OFFSET(rcg), mask, not2d_val); 546 if (ret) 547 return ret; 548 } 549 550 mask = BIT(rcg->hid_width) - 1; 551 mask |= CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK; 552 cfg = f->pre_div << CFG_SRC_DIV_SHIFT; 553 if (rcg->mnd_width && f->n && (f->m != f->n)) 554 cfg |= CFG_MODE_DUAL_EDGE; 555 if (rcg->hw_clk_ctrl) 556 cfg |= CFG_HW_CLK_CTRL_MASK; 557 558 *_cfg &= ~mask; 559 *_cfg |= cfg; 560 561 return 0; 562 } 563 564 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f, 565 u32 *_cfg) 566 { 567 int ret; 568 569 ret = __clk_rcg2_configure_parent(rcg, f->src, _cfg); 570 if (ret) 571 return ret; 572 573 ret = __clk_rcg2_configure_mnd(rcg, f, _cfg); 574 if (ret) 575 return ret; 576 577 return 0; 578 } 579 580 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) 581 { 582 u32 cfg; 583 int ret; 584 585 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 586 if (ret) 587 return ret; 588 589 ret = __clk_rcg2_configure(rcg, f, &cfg); 590 if (ret) 591 return ret; 592 593 ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg); 594 if (ret) 595 return ret; 596 597 return update_config(rcg); 598 } 599 600 static int clk_rcg2_configure_gp(struct clk_rcg2 *rcg, const struct freq_tbl *f) 601 { 602 u32 cfg; 603 int ret; 604 605 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 606 if (ret) 607 return ret; 608 609 ret = __clk_rcg2_configure_mnd(rcg, f, &cfg); 610 if (ret) 611 return ret; 612 613 ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg); 614 if (ret) 615 return ret; 616 617 return update_config(rcg); 618 } 619 620 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 621 enum freq_policy policy) 622 { 623 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 624 const struct freq_tbl *f; 625 626 switch (policy) { 627 case FLOOR: 628 f = qcom_find_freq_floor(rcg->freq_tbl, rate); 629 break; 630 case CEIL: 631 f = qcom_find_freq(rcg->freq_tbl, rate); 632 break; 633 default: 634 return -EINVAL; 635 } 636 637 if (!f) 638 return -EINVAL; 639 640 return clk_rcg2_configure(rcg, f); 641 } 642 643 static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate) 644 { 645 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 646 const struct freq_multi_tbl *f; 647 const struct freq_conf *conf; 648 struct freq_tbl f_tbl = {}; 649 650 f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate); 651 if (!f || !f->confs) 652 return -EINVAL; 653 654 conf = __clk_rcg2_select_conf(hw, f, rate); 655 if (IS_ERR(conf)) 656 return PTR_ERR(conf); 657 658 f_tbl.freq = f->freq; 659 f_tbl.src = conf->src; 660 f_tbl.pre_div = conf->pre_div; 661 f_tbl.m = conf->m; 662 f_tbl.n = conf->n; 663 664 return clk_rcg2_configure(rcg, &f_tbl); 665 } 666 667 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 668 unsigned long parent_rate) 669 { 670 return __clk_rcg2_set_rate(hw, rate, CEIL); 671 } 672 673 static int clk_rcg2_set_gp_rate(struct clk_hw *hw, unsigned long rate, 674 unsigned long parent_rate) 675 { 676 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 677 int mnd_max = BIT(rcg->mnd_width) - 1; 678 int hid_max = BIT(rcg->hid_width) - 1; 679 struct freq_tbl f_tbl = {}, *f = &f_tbl; 680 int ret; 681 682 clk_rcg2_calc_mnd(parent_rate, rate, f, mnd_max, hid_max / 2); 683 convert_to_reg_val(f); 684 ret = clk_rcg2_configure_gp(rcg, f); 685 686 return ret; 687 } 688 689 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate, 690 unsigned long parent_rate) 691 { 692 return __clk_rcg2_set_rate(hw, rate, FLOOR); 693 } 694 695 static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate, 696 unsigned long parent_rate) 697 { 698 return __clk_rcg2_fm_set_rate(hw, rate); 699 } 700 701 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw, 702 unsigned long rate, unsigned long parent_rate, u8 index) 703 { 704 return __clk_rcg2_set_rate(hw, rate, CEIL); 705 } 706 707 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, 708 unsigned long rate, unsigned long parent_rate, u8 index) 709 { 710 return __clk_rcg2_set_rate(hw, rate, FLOOR); 711 } 712 713 static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw, 714 unsigned long rate, unsigned long parent_rate, u8 index) 715 { 716 return __clk_rcg2_fm_set_rate(hw, rate); 717 } 718 719 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) 720 { 721 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 722 u32 notn_m, n, m, d, not2d, mask; 723 724 if (!rcg->mnd_width) { 725 /* 50 % duty-cycle for Non-MND RCGs */ 726 duty->num = 1; 727 duty->den = 2; 728 return 0; 729 } 730 731 regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d); 732 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 733 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); 734 735 if (!not2d && !m && !notn_m) { 736 /* 50 % duty-cycle always */ 737 duty->num = 1; 738 duty->den = 2; 739 return 0; 740 } 741 742 mask = BIT(rcg->mnd_width) - 1; 743 744 d = ~(not2d) & mask; 745 d = DIV_ROUND_CLOSEST(d, 2); 746 747 n = (~(notn_m) + m) & mask; 748 749 duty->num = d; 750 duty->den = n; 751 752 return 0; 753 } 754 755 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) 756 { 757 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 758 u32 notn_m, n, m, d, not2d, mask, duty_per, cfg; 759 int ret; 760 761 /* Duty-cycle cannot be modified for non-MND RCGs */ 762 if (!rcg->mnd_width) 763 return -EINVAL; 764 765 mask = BIT(rcg->mnd_width) - 1; 766 767 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); 768 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 769 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 770 771 /* Duty-cycle cannot be modified if MND divider is in bypass mode. */ 772 if (!(cfg & CFG_MODE_MASK)) 773 return -EINVAL; 774 775 n = (~(notn_m) + m) & mask; 776 777 duty_per = (duty->num * 100) / duty->den; 778 779 /* Calculate 2d value */ 780 d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100); 781 782 /* 783 * Check bit widths of 2d. If D is too big reduce duty cycle. 784 * Also make sure it is never zero. 785 */ 786 d = clamp_val(d, 1, mask); 787 788 if ((d / 2) > (n - m)) 789 d = (n - m) * 2; 790 else if ((d / 2) < (m / 2)) 791 d = m; 792 793 not2d = ~d & mask; 794 795 ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask, 796 not2d); 797 if (ret) 798 return ret; 799 800 return update_config(rcg); 801 } 802 803 const struct clk_ops clk_rcg2_ops = { 804 .is_enabled = clk_rcg2_is_enabled, 805 .get_parent = clk_rcg2_get_parent, 806 .set_parent = clk_rcg2_set_parent, 807 .recalc_rate = clk_rcg2_recalc_rate, 808 .determine_rate = clk_rcg2_determine_rate, 809 .set_rate = clk_rcg2_set_rate, 810 .set_rate_and_parent = clk_rcg2_set_rate_and_parent, 811 .get_duty_cycle = clk_rcg2_get_duty_cycle, 812 .set_duty_cycle = clk_rcg2_set_duty_cycle, 813 }; 814 EXPORT_SYMBOL_GPL(clk_rcg2_ops); 815 816 const struct clk_ops clk_rcg2_gp_ops = { 817 .is_enabled = clk_rcg2_is_enabled, 818 .get_parent = clk_rcg2_get_parent, 819 .set_parent = clk_rcg2_set_parent, 820 .recalc_rate = clk_rcg2_recalc_rate, 821 .determine_rate = clk_rcg2_determine_gp_rate, 822 .set_rate = clk_rcg2_set_gp_rate, 823 .get_duty_cycle = clk_rcg2_get_duty_cycle, 824 .set_duty_cycle = clk_rcg2_set_duty_cycle, 825 }; 826 EXPORT_SYMBOL_GPL(clk_rcg2_gp_ops); 827 828 const struct clk_ops clk_rcg2_floor_ops = { 829 .is_enabled = clk_rcg2_is_enabled, 830 .get_parent = clk_rcg2_get_parent, 831 .set_parent = clk_rcg2_set_parent, 832 .recalc_rate = clk_rcg2_recalc_rate, 833 .determine_rate = clk_rcg2_determine_floor_rate, 834 .set_rate = clk_rcg2_set_floor_rate, 835 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, 836 .get_duty_cycle = clk_rcg2_get_duty_cycle, 837 .set_duty_cycle = clk_rcg2_set_duty_cycle, 838 }; 839 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); 840 841 const struct clk_ops clk_rcg2_fm_ops = { 842 .is_enabled = clk_rcg2_is_enabled, 843 .get_parent = clk_rcg2_get_parent, 844 .set_parent = clk_rcg2_set_parent, 845 .recalc_rate = clk_rcg2_recalc_rate, 846 .determine_rate = clk_rcg2_fm_determine_rate, 847 .set_rate = clk_rcg2_fm_set_rate, 848 .set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent, 849 .get_duty_cycle = clk_rcg2_get_duty_cycle, 850 .set_duty_cycle = clk_rcg2_set_duty_cycle, 851 }; 852 EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops); 853 854 const struct clk_ops clk_rcg2_mux_closest_ops = { 855 .determine_rate = __clk_mux_determine_rate_closest, 856 .get_parent = clk_rcg2_get_parent, 857 .set_parent = clk_rcg2_set_parent, 858 }; 859 EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops); 860 861 struct frac_entry { 862 int num; 863 int den; 864 }; 865 866 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */ 867 { 52, 295 }, /* 119 M */ 868 { 11, 57 }, /* 130.25 M */ 869 { 63, 307 }, /* 138.50 M */ 870 { 11, 50 }, /* 148.50 M */ 871 { 47, 206 }, /* 154 M */ 872 { 31, 100 }, /* 205.25 M */ 873 { 107, 269 }, /* 268.50 M */ 874 { }, 875 }; 876 877 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */ 878 { 31, 211 }, /* 119 M */ 879 { 32, 199 }, /* 130.25 M */ 880 { 63, 307 }, /* 138.50 M */ 881 { 11, 60 }, /* 148.50 M */ 882 { 50, 263 }, /* 154 M */ 883 { 31, 120 }, /* 205.25 M */ 884 { 119, 359 }, /* 268.50 M */ 885 { }, 886 }; 887 888 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 889 unsigned long parent_rate) 890 { 891 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 892 struct freq_tbl f = *rcg->freq_tbl; 893 const struct frac_entry *frac; 894 int delta = 100000; 895 s64 src_rate = parent_rate; 896 s64 request; 897 u32 mask = BIT(rcg->hid_width) - 1; 898 u32 hid_div; 899 900 if (src_rate == 810000000) 901 frac = frac_table_810m; 902 else 903 frac = frac_table_675m; 904 905 for (; frac->num; frac++) { 906 request = rate; 907 request *= frac->den; 908 request = div_s64(request, frac->num); 909 if ((src_rate < (request - delta)) || 910 (src_rate > (request + delta))) 911 continue; 912 913 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 914 &hid_div); 915 f.pre_div = hid_div; 916 f.pre_div >>= CFG_SRC_DIV_SHIFT; 917 f.pre_div &= mask; 918 f.m = frac->num; 919 f.n = frac->den; 920 921 return clk_rcg2_configure(rcg, &f); 922 } 923 924 return -EINVAL; 925 } 926 927 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw, 928 unsigned long rate, unsigned long parent_rate, u8 index) 929 { 930 /* Parent index is set statically in frequency table */ 931 return clk_edp_pixel_set_rate(hw, rate, parent_rate); 932 } 933 934 static int clk_edp_pixel_determine_rate(struct clk_hw *hw, 935 struct clk_rate_request *req) 936 { 937 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 938 const struct freq_tbl *f = rcg->freq_tbl; 939 const struct frac_entry *frac; 940 int delta = 100000; 941 s64 request; 942 u32 mask = BIT(rcg->hid_width) - 1; 943 u32 hid_div; 944 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 945 946 /* Force the correct parent */ 947 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index); 948 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw); 949 950 if (req->best_parent_rate == 810000000) 951 frac = frac_table_810m; 952 else 953 frac = frac_table_675m; 954 955 for (; frac->num; frac++) { 956 request = req->rate; 957 request *= frac->den; 958 request = div_s64(request, frac->num); 959 if ((req->best_parent_rate < (request - delta)) || 960 (req->best_parent_rate > (request + delta))) 961 continue; 962 963 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 964 &hid_div); 965 hid_div >>= CFG_SRC_DIV_SHIFT; 966 hid_div &= mask; 967 968 req->rate = calc_rate(req->best_parent_rate, 969 frac->num, frac->den, 970 !!frac->den, hid_div); 971 return 0; 972 } 973 974 return -EINVAL; 975 } 976 977 const struct clk_ops clk_edp_pixel_ops = { 978 .is_enabled = clk_rcg2_is_enabled, 979 .get_parent = clk_rcg2_get_parent, 980 .set_parent = clk_rcg2_set_parent, 981 .recalc_rate = clk_rcg2_recalc_rate, 982 .set_rate = clk_edp_pixel_set_rate, 983 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent, 984 .determine_rate = clk_edp_pixel_determine_rate, 985 }; 986 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 987 988 static int clk_byte_determine_rate(struct clk_hw *hw, 989 struct clk_rate_request *req) 990 { 991 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 992 const struct freq_tbl *f = rcg->freq_tbl; 993 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 994 unsigned long parent_rate, div; 995 u32 mask = BIT(rcg->hid_width) - 1; 996 struct clk_hw *p; 997 998 if (req->rate == 0) 999 return -EINVAL; 1000 1001 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index); 1002 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate); 1003 1004 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1; 1005 div = min_t(u32, div, mask); 1006 1007 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 1008 1009 return 0; 1010 } 1011 1012 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate, 1013 unsigned long parent_rate) 1014 { 1015 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1016 struct freq_tbl f = *rcg->freq_tbl; 1017 unsigned long div; 1018 u32 mask = BIT(rcg->hid_width) - 1; 1019 1020 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 1021 div = min_t(u32, div, mask); 1022 1023 f.pre_div = div; 1024 1025 return clk_rcg2_configure(rcg, &f); 1026 } 1027 1028 static int clk_byte_set_rate_and_parent(struct clk_hw *hw, 1029 unsigned long rate, unsigned long parent_rate, u8 index) 1030 { 1031 /* Parent index is set statically in frequency table */ 1032 return clk_byte_set_rate(hw, rate, parent_rate); 1033 } 1034 1035 const struct clk_ops clk_byte_ops = { 1036 .is_enabled = clk_rcg2_is_enabled, 1037 .get_parent = clk_rcg2_get_parent, 1038 .set_parent = clk_rcg2_set_parent, 1039 .recalc_rate = clk_rcg2_recalc_rate, 1040 .set_rate = clk_byte_set_rate, 1041 .set_rate_and_parent = clk_byte_set_rate_and_parent, 1042 .determine_rate = clk_byte_determine_rate, 1043 }; 1044 EXPORT_SYMBOL_GPL(clk_byte_ops); 1045 1046 static int clk_byte2_determine_rate(struct clk_hw *hw, 1047 struct clk_rate_request *req) 1048 { 1049 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1050 unsigned long parent_rate, div; 1051 u32 mask = BIT(rcg->hid_width) - 1; 1052 struct clk_hw *p; 1053 unsigned long rate = req->rate; 1054 1055 if (rate == 0) 1056 return -EINVAL; 1057 1058 p = req->best_parent_hw; 1059 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate); 1060 1061 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 1062 div = min_t(u32, div, mask); 1063 1064 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 1065 1066 return 0; 1067 } 1068 1069 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate, 1070 unsigned long parent_rate) 1071 { 1072 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1073 struct freq_tbl f = { 0 }; 1074 unsigned long div; 1075 int i, num_parents = clk_hw_get_num_parents(hw); 1076 u32 mask = BIT(rcg->hid_width) - 1; 1077 u32 cfg; 1078 1079 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 1080 div = min_t(u32, div, mask); 1081 1082 f.pre_div = div; 1083 1084 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 1085 cfg &= CFG_SRC_SEL_MASK; 1086 cfg >>= CFG_SRC_SEL_SHIFT; 1087 1088 for (i = 0; i < num_parents; i++) { 1089 if (cfg == rcg->parent_map[i].cfg) { 1090 f.src = rcg->parent_map[i].src; 1091 return clk_rcg2_configure(rcg, &f); 1092 } 1093 } 1094 1095 return -EINVAL; 1096 } 1097 1098 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw, 1099 unsigned long rate, unsigned long parent_rate, u8 index) 1100 { 1101 /* Read the hardware to determine parent during set_rate */ 1102 return clk_byte2_set_rate(hw, rate, parent_rate); 1103 } 1104 1105 const struct clk_ops clk_byte2_ops = { 1106 .is_enabled = clk_rcg2_is_enabled, 1107 .get_parent = clk_rcg2_get_parent, 1108 .set_parent = clk_rcg2_set_parent, 1109 .recalc_rate = clk_rcg2_recalc_rate, 1110 .set_rate = clk_byte2_set_rate, 1111 .set_rate_and_parent = clk_byte2_set_rate_and_parent, 1112 .determine_rate = clk_byte2_determine_rate, 1113 }; 1114 EXPORT_SYMBOL_GPL(clk_byte2_ops); 1115 1116 static const struct frac_entry frac_table_pixel[] = { 1117 { 3, 8 }, 1118 { 2, 9 }, 1119 { 4, 9 }, 1120 { 1, 1 }, 1121 { 2, 3 }, 1122 { } 1123 }; 1124 1125 static int clk_pixel_determine_rate(struct clk_hw *hw, 1126 struct clk_rate_request *req) 1127 { 1128 unsigned long request, src_rate; 1129 int delta = 100000; 1130 const struct frac_entry *frac = frac_table_pixel; 1131 1132 for (; frac->num; frac++) { 1133 request = (req->rate * frac->den) / frac->num; 1134 1135 src_rate = clk_hw_round_rate(req->best_parent_hw, request); 1136 if ((src_rate < (request - delta)) || 1137 (src_rate > (request + delta))) 1138 continue; 1139 1140 req->best_parent_rate = src_rate; 1141 req->rate = (src_rate * frac->num) / frac->den; 1142 return 0; 1143 } 1144 1145 return -EINVAL; 1146 } 1147 1148 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 1149 unsigned long parent_rate) 1150 { 1151 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1152 struct freq_tbl f = { 0 }; 1153 const struct frac_entry *frac = frac_table_pixel; 1154 unsigned long request; 1155 int delta = 100000; 1156 u32 mask = BIT(rcg->hid_width) - 1; 1157 u32 hid_div, cfg; 1158 int i, num_parents = clk_hw_get_num_parents(hw); 1159 1160 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 1161 cfg &= CFG_SRC_SEL_MASK; 1162 cfg >>= CFG_SRC_SEL_SHIFT; 1163 1164 for (i = 0; i < num_parents; i++) 1165 if (cfg == rcg->parent_map[i].cfg) { 1166 f.src = rcg->parent_map[i].src; 1167 break; 1168 } 1169 1170 for (; frac->num; frac++) { 1171 request = (rate * frac->den) / frac->num; 1172 1173 if ((parent_rate < (request - delta)) || 1174 (parent_rate > (request + delta))) 1175 continue; 1176 1177 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 1178 &hid_div); 1179 f.pre_div = hid_div; 1180 f.pre_div >>= CFG_SRC_DIV_SHIFT; 1181 f.pre_div &= mask; 1182 f.m = frac->num; 1183 f.n = frac->den; 1184 1185 return clk_rcg2_configure(rcg, &f); 1186 } 1187 return -EINVAL; 1188 } 1189 1190 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 1191 unsigned long parent_rate, u8 index) 1192 { 1193 return clk_pixel_set_rate(hw, rate, parent_rate); 1194 } 1195 1196 const struct clk_ops clk_pixel_ops = { 1197 .is_enabled = clk_rcg2_is_enabled, 1198 .get_parent = clk_rcg2_get_parent, 1199 .set_parent = clk_rcg2_set_parent, 1200 .recalc_rate = clk_rcg2_recalc_rate, 1201 .set_rate = clk_pixel_set_rate, 1202 .set_rate_and_parent = clk_pixel_set_rate_and_parent, 1203 .determine_rate = clk_pixel_determine_rate, 1204 }; 1205 EXPORT_SYMBOL_GPL(clk_pixel_ops); 1206 1207 static int clk_gfx3d_determine_rate(struct clk_hw *hw, 1208 struct clk_rate_request *req) 1209 { 1210 struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX }; 1211 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 1212 struct clk_hw *xo, *p0, *p1, *p2; 1213 unsigned long p0_rate; 1214 u8 mux_div = cgfx->div; 1215 int ret; 1216 1217 p0 = cgfx->hws[0]; 1218 p1 = cgfx->hws[1]; 1219 p2 = cgfx->hws[2]; 1220 /* 1221 * This function does ping-pong the RCG between PLLs: if we don't 1222 * have at least one fixed PLL and two variable ones, 1223 * then it's not going to work correctly. 1224 */ 1225 if (WARN_ON(!p0 || !p1 || !p2)) 1226 return -EINVAL; 1227 1228 xo = clk_hw_get_parent_by_index(hw, 0); 1229 if (req->rate == clk_hw_get_rate(xo)) { 1230 req->best_parent_hw = xo; 1231 return 0; 1232 } 1233 1234 if (mux_div == 0) 1235 mux_div = 1; 1236 1237 parent_req.rate = req->rate * mux_div; 1238 1239 /* This has to be a fixed rate PLL */ 1240 p0_rate = clk_hw_get_rate(p0); 1241 1242 if (parent_req.rate == p0_rate) { 1243 req->rate = req->best_parent_rate = p0_rate; 1244 req->best_parent_hw = p0; 1245 return 0; 1246 } 1247 1248 if (req->best_parent_hw == p0) { 1249 /* Are we going back to a previously used rate? */ 1250 if (clk_hw_get_rate(p2) == parent_req.rate) 1251 req->best_parent_hw = p2; 1252 else 1253 req->best_parent_hw = p1; 1254 } else if (req->best_parent_hw == p2) { 1255 req->best_parent_hw = p1; 1256 } else { 1257 req->best_parent_hw = p2; 1258 } 1259 1260 clk_hw_get_rate_range(req->best_parent_hw, 1261 &parent_req.min_rate, &parent_req.max_rate); 1262 1263 if (req->min_rate > parent_req.min_rate) 1264 parent_req.min_rate = req->min_rate; 1265 1266 if (req->max_rate < parent_req.max_rate) 1267 parent_req.max_rate = req->max_rate; 1268 1269 ret = __clk_determine_rate(req->best_parent_hw, &parent_req); 1270 if (ret) 1271 return ret; 1272 1273 req->rate = req->best_parent_rate = parent_req.rate; 1274 req->rate /= mux_div; 1275 1276 return 0; 1277 } 1278 1279 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 1280 unsigned long parent_rate, u8 index) 1281 { 1282 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 1283 struct clk_rcg2 *rcg = &cgfx->rcg; 1284 u32 cfg; 1285 int ret; 1286 1287 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 1288 /* On some targets, the GFX3D RCG may need to divide PLL frequency */ 1289 if (cgfx->div > 1) 1290 cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT; 1291 1292 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg); 1293 if (ret) 1294 return ret; 1295 1296 return update_config(rcg); 1297 } 1298 1299 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate, 1300 unsigned long parent_rate) 1301 { 1302 /* 1303 * We should never get here; clk_gfx3d_determine_rate() should always 1304 * make us use a different parent than what we're currently using, so 1305 * clk_gfx3d_set_rate_and_parent() should always be called. 1306 */ 1307 return 0; 1308 } 1309 1310 const struct clk_ops clk_gfx3d_ops = { 1311 .is_enabled = clk_rcg2_is_enabled, 1312 .get_parent = clk_rcg2_get_parent, 1313 .set_parent = clk_rcg2_set_parent, 1314 .recalc_rate = clk_rcg2_recalc_rate, 1315 .set_rate = clk_gfx3d_set_rate, 1316 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent, 1317 .determine_rate = clk_gfx3d_determine_rate, 1318 }; 1319 EXPORT_SYMBOL_GPL(clk_gfx3d_ops); 1320 1321 static int clk_rcg2_set_force_enable(struct clk_hw *hw) 1322 { 1323 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1324 const char *name = clk_hw_get_name(hw); 1325 int ret, count; 1326 1327 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 1328 CMD_ROOT_EN, CMD_ROOT_EN); 1329 if (ret) 1330 return ret; 1331 1332 /* wait for RCG to turn ON */ 1333 for (count = 500; count > 0; count--) { 1334 if (clk_rcg2_is_enabled(hw)) 1335 return 0; 1336 1337 udelay(1); 1338 } 1339 1340 pr_err("%s: RCG did not turn on\n", name); 1341 return -ETIMEDOUT; 1342 } 1343 1344 static int clk_rcg2_clear_force_enable(struct clk_hw *hw) 1345 { 1346 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1347 1348 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 1349 CMD_ROOT_EN, 0); 1350 } 1351 1352 static int 1353 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f) 1354 { 1355 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1356 int ret; 1357 1358 ret = clk_rcg2_set_force_enable(hw); 1359 if (ret) 1360 return ret; 1361 1362 ret = clk_rcg2_configure(rcg, f); 1363 if (ret) 1364 return ret; 1365 1366 return clk_rcg2_clear_force_enable(hw); 1367 } 1368 1369 static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, 1370 unsigned long parent_rate, 1371 enum freq_policy policy) 1372 { 1373 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1374 const struct freq_tbl *f; 1375 1376 switch (policy) { 1377 case FLOOR: 1378 f = qcom_find_freq_floor(rcg->freq_tbl, rate); 1379 break; 1380 case CEIL: 1381 f = qcom_find_freq(rcg->freq_tbl, rate); 1382 break; 1383 default: 1384 return -EINVAL; 1385 } 1386 1387 /* 1388 * In case clock is disabled, update the M, N and D registers, cache 1389 * the CFG value in parked_cfg and don't hit the update bit of CMD 1390 * register. 1391 */ 1392 if (!clk_hw_is_enabled(hw)) 1393 return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg); 1394 1395 return clk_rcg2_shared_force_enable_clear(hw, f); 1396 } 1397 1398 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, 1399 unsigned long parent_rate) 1400 { 1401 return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL); 1402 } 1403 1404 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw, 1405 unsigned long rate, unsigned long parent_rate, u8 index) 1406 { 1407 return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL); 1408 } 1409 1410 static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate, 1411 unsigned long parent_rate) 1412 { 1413 return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR); 1414 } 1415 1416 static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw, 1417 unsigned long rate, unsigned long parent_rate, u8 index) 1418 { 1419 return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR); 1420 } 1421 1422 static int clk_rcg2_shared_enable(struct clk_hw *hw) 1423 { 1424 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1425 int ret; 1426 1427 /* 1428 * Set the update bit because required configuration has already 1429 * been written in clk_rcg2_shared_set_rate() 1430 */ 1431 ret = clk_rcg2_set_force_enable(hw); 1432 if (ret) 1433 return ret; 1434 1435 /* Write back the stored configuration corresponding to current rate */ 1436 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg); 1437 if (ret) 1438 return ret; 1439 1440 ret = update_config(rcg); 1441 if (ret) 1442 return ret; 1443 1444 return clk_rcg2_clear_force_enable(hw); 1445 } 1446 1447 static void clk_rcg2_shared_disable(struct clk_hw *hw) 1448 { 1449 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1450 1451 /* 1452 * Store current configuration as switching to safe source would clear 1453 * the SRC and DIV of CFG register 1454 */ 1455 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); 1456 1457 /* 1458 * Park the RCG at a safe configuration - sourced off of safe source. 1459 * Force enable and disable the RCG while configuring it to safeguard 1460 * against any update signal coming from the downstream clock. 1461 * The current parent is still prepared and enabled at this point, and 1462 * the safe source is always on while application processor subsystem 1463 * is online. Therefore, the RCG can safely switch its parent. 1464 */ 1465 clk_rcg2_set_force_enable(hw); 1466 1467 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 1468 rcg->safe_src_index << CFG_SRC_SEL_SHIFT); 1469 1470 update_config(rcg); 1471 1472 clk_rcg2_clear_force_enable(hw); 1473 } 1474 1475 static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw) 1476 { 1477 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1478 1479 /* If the shared rcg is parked use the cached cfg instead */ 1480 if (!clk_hw_is_enabled(hw)) 1481 return __clk_rcg2_get_parent(hw, rcg->parked_cfg); 1482 1483 return clk_rcg2_get_parent(hw); 1484 } 1485 1486 static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index) 1487 { 1488 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1489 1490 /* If the shared rcg is parked only update the cached cfg */ 1491 if (!clk_hw_is_enabled(hw)) { 1492 rcg->parked_cfg &= ~CFG_SRC_SEL_MASK; 1493 rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 1494 1495 return 0; 1496 } 1497 1498 return clk_rcg2_set_parent(hw, index); 1499 } 1500 1501 static unsigned long 1502 clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1503 { 1504 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1505 1506 /* If the shared rcg is parked use the cached cfg instead */ 1507 if (!clk_hw_is_enabled(hw)) 1508 return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg); 1509 1510 return clk_rcg2_recalc_rate(hw, parent_rate); 1511 } 1512 1513 static int clk_rcg2_shared_init(struct clk_hw *hw) 1514 { 1515 /* 1516 * This does a few things: 1517 * 1518 * 1. Sets rcg->parked_cfg to reflect the value at probe so that the 1519 * proper parent is reported from clk_rcg2_shared_get_parent(). 1520 * 1521 * 2. Clears the force enable bit of the RCG because we rely on child 1522 * clks (branches) to turn the RCG on/off with a hardware feedback 1523 * mechanism and only set the force enable bit in the RCG when we 1524 * want to make sure the clk stays on for parent switches or 1525 * parking. 1526 * 1527 * 3. Parks shared RCGs on the safe source at registration because we 1528 * can't be certain that the parent clk will stay on during boot, 1529 * especially if the parent is shared. If this RCG is enabled at 1530 * boot, and the parent is turned off, the RCG will get stuck on. A 1531 * GDSC can wedge if is turned on and the RCG is stuck on because 1532 * the GDSC's controller will hang waiting for the clk status to 1533 * toggle on when it never does. 1534 * 1535 * The safest option here is to "park" the RCG at init so that the clk 1536 * can never get stuck on or off. This ensures the GDSC can't get 1537 * wedged. 1538 */ 1539 clk_rcg2_shared_disable(hw); 1540 1541 return 0; 1542 } 1543 1544 const struct clk_ops clk_rcg2_shared_ops = { 1545 .init = clk_rcg2_shared_init, 1546 .enable = clk_rcg2_shared_enable, 1547 .disable = clk_rcg2_shared_disable, 1548 .get_parent = clk_rcg2_shared_get_parent, 1549 .set_parent = clk_rcg2_shared_set_parent, 1550 .recalc_rate = clk_rcg2_shared_recalc_rate, 1551 .determine_rate = clk_rcg2_determine_rate, 1552 .set_rate = clk_rcg2_shared_set_rate, 1553 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, 1554 }; 1555 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); 1556 1557 const struct clk_ops clk_rcg2_shared_floor_ops = { 1558 .enable = clk_rcg2_shared_enable, 1559 .disable = clk_rcg2_shared_disable, 1560 .get_parent = clk_rcg2_shared_get_parent, 1561 .set_parent = clk_rcg2_shared_set_parent, 1562 .recalc_rate = clk_rcg2_shared_recalc_rate, 1563 .determine_rate = clk_rcg2_determine_floor_rate, 1564 .set_rate = clk_rcg2_shared_set_floor_rate, 1565 .set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent, 1566 }; 1567 EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops); 1568 1569 static int clk_rcg2_shared_no_init_park(struct clk_hw *hw) 1570 { 1571 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1572 1573 /* 1574 * Read the config register so that the parent is properly mapped at 1575 * registration time. 1576 */ 1577 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); 1578 1579 return 0; 1580 } 1581 1582 /* 1583 * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left 1584 * unchanged at registration time. 1585 */ 1586 const struct clk_ops clk_rcg2_shared_no_init_park_ops = { 1587 .init = clk_rcg2_shared_no_init_park, 1588 .enable = clk_rcg2_shared_enable, 1589 .disable = clk_rcg2_shared_disable, 1590 .get_parent = clk_rcg2_shared_get_parent, 1591 .set_parent = clk_rcg2_shared_set_parent, 1592 .recalc_rate = clk_rcg2_shared_recalc_rate, 1593 .determine_rate = clk_rcg2_determine_rate, 1594 .set_rate = clk_rcg2_shared_set_rate, 1595 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, 1596 }; 1597 EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops); 1598 1599 /* Common APIs to be used for DFS based RCGR */ 1600 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, 1601 struct freq_tbl *f) 1602 { 1603 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1604 struct clk_hw *p; 1605 unsigned long prate = 0; 1606 u32 val, mask, cfg, mode, src; 1607 int i, num_parents; 1608 1609 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); 1610 1611 mask = BIT(rcg->hid_width) - 1; 1612 f->pre_div = 1; 1613 if (cfg & mask) 1614 f->pre_div = cfg & mask; 1615 1616 src = cfg & CFG_SRC_SEL_MASK; 1617 src >>= CFG_SRC_SEL_SHIFT; 1618 1619 num_parents = clk_hw_get_num_parents(hw); 1620 for (i = 0; i < num_parents; i++) { 1621 if (src == rcg->parent_map[i].cfg) { 1622 f->src = rcg->parent_map[i].src; 1623 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); 1624 prate = clk_hw_get_rate(p); 1625 } 1626 } 1627 1628 mode = cfg & CFG_MODE_MASK; 1629 mode >>= CFG_MODE_SHIFT; 1630 if (mode) { 1631 mask = BIT(rcg->mnd_width) - 1; 1632 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l), 1633 &val); 1634 val &= mask; 1635 f->m = val; 1636 1637 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l), 1638 &val); 1639 val = ~val; 1640 val &= mask; 1641 val += f->m; 1642 f->n = val; 1643 } 1644 1645 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div); 1646 } 1647 1648 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg) 1649 { 1650 struct freq_tbl *freq_tbl; 1651 int i; 1652 1653 /* Allocate space for 1 extra since table is NULL terminated */ 1654 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL); 1655 if (!freq_tbl) 1656 return -ENOMEM; 1657 rcg->freq_tbl = freq_tbl; 1658 1659 for (i = 0; i < MAX_PERF_LEVEL; i++) 1660 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i); 1661 1662 return 0; 1663 } 1664 1665 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw, 1666 struct clk_rate_request *req) 1667 { 1668 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1669 int ret; 1670 1671 if (!rcg->freq_tbl) { 1672 ret = clk_rcg2_dfs_populate_freq_table(rcg); 1673 if (ret) { 1674 pr_err("Failed to update DFS tables for %s\n", 1675 clk_hw_get_name(hw)); 1676 return ret; 1677 } 1678 } 1679 1680 return clk_rcg2_determine_rate(hw, req); 1681 } 1682 1683 static unsigned long 1684 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1685 { 1686 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1687 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div; 1688 1689 regmap_read(rcg->clkr.regmap, 1690 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level); 1691 level &= GENMASK(4, 1); 1692 level >>= 1; 1693 1694 if (rcg->freq_tbl) 1695 return rcg->freq_tbl[level].freq; 1696 1697 /* 1698 * Assume that parent_rate is actually the parent because 1699 * we can't do any better at figuring it out when the table 1700 * hasn't been populated yet. We only populate the table 1701 * in determine_rate because we can't guarantee the parents 1702 * will be registered with the framework until then. 1703 */ 1704 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level), 1705 &cfg); 1706 1707 mask = BIT(rcg->hid_width) - 1; 1708 pre_div = 1; 1709 if (cfg & mask) 1710 pre_div = cfg & mask; 1711 1712 mode = cfg & CFG_MODE_MASK; 1713 mode >>= CFG_MODE_SHIFT; 1714 if (mode) { 1715 mask = BIT(rcg->mnd_width) - 1; 1716 regmap_read(rcg->clkr.regmap, 1717 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m); 1718 m &= mask; 1719 1720 regmap_read(rcg->clkr.regmap, 1721 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n); 1722 n = ~n; 1723 n &= mask; 1724 n += m; 1725 } 1726 1727 return calc_rate(parent_rate, m, n, mode, pre_div); 1728 } 1729 1730 static const struct clk_ops clk_rcg2_dfs_ops = { 1731 .is_enabled = clk_rcg2_is_enabled, 1732 .get_parent = clk_rcg2_get_parent, 1733 .determine_rate = clk_rcg2_dfs_determine_rate, 1734 .recalc_rate = clk_rcg2_dfs_recalc_rate, 1735 }; 1736 1737 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data, 1738 struct regmap *regmap) 1739 { 1740 struct clk_rcg2 *rcg = data->rcg; 1741 struct clk_init_data *init = data->init; 1742 u32 val; 1743 int ret; 1744 1745 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val); 1746 if (ret) 1747 return -EINVAL; 1748 1749 if (!(val & SE_CMD_DFS_EN)) 1750 return 0; 1751 1752 /* 1753 * Rate changes with consumer writing a register in 1754 * their own I/O region 1755 */ 1756 init->flags |= CLK_GET_RATE_NOCACHE; 1757 init->ops = &clk_rcg2_dfs_ops; 1758 1759 rcg->freq_tbl = NULL; 1760 1761 return 0; 1762 } 1763 1764 int qcom_cc_register_rcg_dfs(struct regmap *regmap, 1765 const struct clk_rcg_dfs_data *rcgs, size_t len) 1766 { 1767 int i, ret; 1768 1769 for (i = 0; i < len; i++) { 1770 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap); 1771 if (ret) 1772 return ret; 1773 } 1774 1775 return 0; 1776 } 1777 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs); 1778 1779 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate, 1780 unsigned long parent_rate) 1781 { 1782 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1783 struct freq_tbl f = { 0 }; 1784 u32 mask = BIT(rcg->hid_width) - 1; 1785 u32 hid_div, cfg; 1786 int i, num_parents = clk_hw_get_num_parents(hw); 1787 unsigned long num, den; 1788 1789 rational_best_approximation(parent_rate, rate, 1790 GENMASK(rcg->mnd_width - 1, 0), 1791 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1792 1793 if (!num || !den) 1794 return -EINVAL; 1795 1796 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 1797 hid_div = cfg; 1798 cfg &= CFG_SRC_SEL_MASK; 1799 cfg >>= CFG_SRC_SEL_SHIFT; 1800 1801 for (i = 0; i < num_parents; i++) { 1802 if (cfg == rcg->parent_map[i].cfg) { 1803 f.src = rcg->parent_map[i].src; 1804 break; 1805 } 1806 } 1807 1808 f.pre_div = hid_div; 1809 f.pre_div >>= CFG_SRC_DIV_SHIFT; 1810 f.pre_div &= mask; 1811 1812 if (num != den) { 1813 f.m = num; 1814 f.n = den; 1815 } else { 1816 f.m = 0; 1817 f.n = 0; 1818 } 1819 1820 return clk_rcg2_configure(rcg, &f); 1821 } 1822 1823 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw, 1824 unsigned long rate, unsigned long parent_rate, u8 index) 1825 { 1826 return clk_rcg2_dp_set_rate(hw, rate, parent_rate); 1827 } 1828 1829 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw, 1830 struct clk_rate_request *req) 1831 { 1832 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1833 unsigned long num, den; 1834 u64 tmp; 1835 1836 /* Parent rate is a fixed phy link rate */ 1837 rational_best_approximation(req->best_parent_rate, req->rate, 1838 GENMASK(rcg->mnd_width - 1, 0), 1839 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1840 1841 if (!num || !den) 1842 return -EINVAL; 1843 1844 tmp = req->best_parent_rate * num; 1845 do_div(tmp, den); 1846 req->rate = tmp; 1847 1848 return 0; 1849 } 1850 1851 const struct clk_ops clk_dp_ops = { 1852 .is_enabled = clk_rcg2_is_enabled, 1853 .get_parent = clk_rcg2_get_parent, 1854 .set_parent = clk_rcg2_set_parent, 1855 .recalc_rate = clk_rcg2_recalc_rate, 1856 .set_rate = clk_rcg2_dp_set_rate, 1857 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent, 1858 .determine_rate = clk_rcg2_dp_determine_rate, 1859 }; 1860 EXPORT_SYMBOL_GPL(clk_dp_ops); 1861