1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/bitops.h> 8 #include <linux/err.h> 9 #include <linux/bug.h> 10 #include <linux/export.h> 11 #include <linux/clk-provider.h> 12 #include <linux/delay.h> 13 #include <linux/rational.h> 14 #include <linux/regmap.h> 15 #include <linux/math64.h> 16 #include <linux/slab.h> 17 18 #include <asm/div64.h> 19 20 #include "clk-rcg.h" 21 #include "common.h" 22 23 #define CMD_REG 0x0 24 #define CMD_UPDATE BIT(0) 25 #define CMD_ROOT_EN BIT(1) 26 #define CMD_DIRTY_CFG BIT(4) 27 #define CMD_DIRTY_N BIT(5) 28 #define CMD_DIRTY_M BIT(6) 29 #define CMD_DIRTY_D BIT(7) 30 #define CMD_ROOT_OFF BIT(31) 31 32 #define CFG_REG 0x4 33 #define CFG_SRC_DIV_SHIFT 0 34 #define CFG_SRC_SEL_SHIFT 8 35 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT) 36 #define CFG_MODE_SHIFT 12 37 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT) 38 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT) 39 #define CFG_HW_CLK_CTRL_MASK BIT(20) 40 41 #define M_REG 0x8 42 #define N_REG 0xc 43 #define D_REG 0x10 44 45 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG) 46 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG) 47 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG) 48 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG) 49 50 /* Dynamic Frequency Scaling */ 51 #define MAX_PERF_LEVEL 8 52 #define SE_CMD_DFSR_OFFSET 0x14 53 #define SE_CMD_DFS_EN BIT(0) 54 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level)) 55 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level)) 56 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level)) 57 58 enum freq_policy { 59 FLOOR, 60 CEIL, 61 }; 62 63 static int clk_rcg2_is_enabled(struct clk_hw *hw) 64 { 65 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 66 u32 cmd; 67 int ret; 68 69 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 70 if (ret) 71 return ret; 72 73 return (cmd & CMD_ROOT_OFF) == 0; 74 } 75 76 static u8 clk_rcg2_get_parent(struct clk_hw *hw) 77 { 78 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 79 int num_parents = clk_hw_get_num_parents(hw); 80 u32 cfg; 81 int i, ret; 82 83 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 84 if (ret) 85 goto err; 86 87 cfg &= CFG_SRC_SEL_MASK; 88 cfg >>= CFG_SRC_SEL_SHIFT; 89 90 for (i = 0; i < num_parents; i++) 91 if (cfg == rcg->parent_map[i].cfg) 92 return i; 93 94 err: 95 pr_debug("%s: Clock %s has invalid parent, using default.\n", 96 __func__, clk_hw_get_name(hw)); 97 return 0; 98 } 99 100 static int update_config(struct clk_rcg2 *rcg) 101 { 102 int count, ret; 103 u32 cmd; 104 struct clk_hw *hw = &rcg->clkr.hw; 105 const char *name = clk_hw_get_name(hw); 106 107 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 108 CMD_UPDATE, CMD_UPDATE); 109 if (ret) 110 return ret; 111 112 /* Wait for update to take effect */ 113 for (count = 500; count > 0; count--) { 114 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); 115 if (ret) 116 return ret; 117 if (!(cmd & CMD_UPDATE)) 118 return 0; 119 udelay(1); 120 } 121 122 WARN(1, "%s: rcg didn't update its configuration.", name); 123 return -EBUSY; 124 } 125 126 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) 127 { 128 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 129 int ret; 130 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 131 132 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), 133 CFG_SRC_SEL_MASK, cfg); 134 if (ret) 135 return ret; 136 137 return update_config(rcg); 138 } 139 140 /* 141 * Calculate m/n:d rate 142 * 143 * parent_rate m 144 * rate = ----------- x --- 145 * hid_div n 146 */ 147 static unsigned long 148 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) 149 { 150 if (hid_div) { 151 rate *= 2; 152 rate /= hid_div + 1; 153 } 154 155 if (mode) { 156 u64 tmp = rate; 157 tmp *= m; 158 do_div(tmp, n); 159 rate = tmp; 160 } 161 162 return rate; 163 } 164 165 static unsigned long 166 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 167 { 168 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 169 u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; 170 171 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); 172 173 if (rcg->mnd_width) { 174 mask = BIT(rcg->mnd_width) - 1; 175 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); 176 m &= mask; 177 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n); 178 n = ~n; 179 n &= mask; 180 n += m; 181 mode = cfg & CFG_MODE_MASK; 182 mode >>= CFG_MODE_SHIFT; 183 } 184 185 mask = BIT(rcg->hid_width) - 1; 186 hid_div = cfg >> CFG_SRC_DIV_SHIFT; 187 hid_div &= mask; 188 189 return calc_rate(parent_rate, m, n, mode, hid_div); 190 } 191 192 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, 193 struct clk_rate_request *req, 194 enum freq_policy policy) 195 { 196 unsigned long clk_flags, rate = req->rate; 197 struct clk_hw *p; 198 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 199 int index; 200 201 switch (policy) { 202 case FLOOR: 203 f = qcom_find_freq_floor(f, rate); 204 break; 205 case CEIL: 206 f = qcom_find_freq(f, rate); 207 break; 208 default: 209 return -EINVAL; 210 } 211 212 if (!f) 213 return -EINVAL; 214 215 index = qcom_find_src_index(hw, rcg->parent_map, f->src); 216 if (index < 0) 217 return index; 218 219 clk_flags = clk_hw_get_flags(hw); 220 p = clk_hw_get_parent_by_index(hw, index); 221 if (clk_flags & CLK_SET_RATE_PARENT) { 222 rate = f->freq; 223 if (f->pre_div) { 224 if (!rate) 225 rate = req->rate; 226 rate /= 2; 227 rate *= f->pre_div + 1; 228 } 229 230 if (f->n) { 231 u64 tmp = rate; 232 tmp = tmp * f->n; 233 do_div(tmp, f->m); 234 rate = tmp; 235 } 236 } else { 237 rate = clk_hw_get_rate(p); 238 } 239 req->best_parent_hw = p; 240 req->best_parent_rate = rate; 241 req->rate = f->freq; 242 243 return 0; 244 } 245 246 static int clk_rcg2_determine_rate(struct clk_hw *hw, 247 struct clk_rate_request *req) 248 { 249 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 250 251 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL); 252 } 253 254 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, 255 struct clk_rate_request *req) 256 { 257 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 258 259 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); 260 } 261 262 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) 263 { 264 u32 cfg, mask; 265 struct clk_hw *hw = &rcg->clkr.hw; 266 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src); 267 268 if (index < 0) 269 return index; 270 271 if (rcg->mnd_width && f->n) { 272 mask = BIT(rcg->mnd_width) - 1; 273 ret = regmap_update_bits(rcg->clkr.regmap, 274 RCG_M_OFFSET(rcg), mask, f->m); 275 if (ret) 276 return ret; 277 278 ret = regmap_update_bits(rcg->clkr.regmap, 279 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m)); 280 if (ret) 281 return ret; 282 283 ret = regmap_update_bits(rcg->clkr.regmap, 284 RCG_D_OFFSET(rcg), mask, ~f->n); 285 if (ret) 286 return ret; 287 } 288 289 mask = BIT(rcg->hid_width) - 1; 290 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK; 291 cfg = f->pre_div << CFG_SRC_DIV_SHIFT; 292 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 293 if (rcg->mnd_width && f->n && (f->m != f->n)) 294 cfg |= CFG_MODE_DUAL_EDGE; 295 return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), 296 mask, cfg); 297 } 298 299 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) 300 { 301 int ret; 302 303 ret = __clk_rcg2_configure(rcg, f); 304 if (ret) 305 return ret; 306 307 return update_config(rcg); 308 } 309 310 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 311 enum freq_policy policy) 312 { 313 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 314 const struct freq_tbl *f; 315 316 switch (policy) { 317 case FLOOR: 318 f = qcom_find_freq_floor(rcg->freq_tbl, rate); 319 break; 320 case CEIL: 321 f = qcom_find_freq(rcg->freq_tbl, rate); 322 break; 323 default: 324 return -EINVAL; 325 } 326 327 if (!f) 328 return -EINVAL; 329 330 return clk_rcg2_configure(rcg, f); 331 } 332 333 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, 334 unsigned long parent_rate) 335 { 336 return __clk_rcg2_set_rate(hw, rate, CEIL); 337 } 338 339 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate, 340 unsigned long parent_rate) 341 { 342 return __clk_rcg2_set_rate(hw, rate, FLOOR); 343 } 344 345 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw, 346 unsigned long rate, unsigned long parent_rate, u8 index) 347 { 348 return __clk_rcg2_set_rate(hw, rate, CEIL); 349 } 350 351 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, 352 unsigned long rate, unsigned long parent_rate, u8 index) 353 { 354 return __clk_rcg2_set_rate(hw, rate, FLOOR); 355 } 356 357 const struct clk_ops clk_rcg2_ops = { 358 .is_enabled = clk_rcg2_is_enabled, 359 .get_parent = clk_rcg2_get_parent, 360 .set_parent = clk_rcg2_set_parent, 361 .recalc_rate = clk_rcg2_recalc_rate, 362 .determine_rate = clk_rcg2_determine_rate, 363 .set_rate = clk_rcg2_set_rate, 364 .set_rate_and_parent = clk_rcg2_set_rate_and_parent, 365 }; 366 EXPORT_SYMBOL_GPL(clk_rcg2_ops); 367 368 const struct clk_ops clk_rcg2_floor_ops = { 369 .is_enabled = clk_rcg2_is_enabled, 370 .get_parent = clk_rcg2_get_parent, 371 .set_parent = clk_rcg2_set_parent, 372 .recalc_rate = clk_rcg2_recalc_rate, 373 .determine_rate = clk_rcg2_determine_floor_rate, 374 .set_rate = clk_rcg2_set_floor_rate, 375 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, 376 }; 377 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); 378 379 struct frac_entry { 380 int num; 381 int den; 382 }; 383 384 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */ 385 { 52, 295 }, /* 119 M */ 386 { 11, 57 }, /* 130.25 M */ 387 { 63, 307 }, /* 138.50 M */ 388 { 11, 50 }, /* 148.50 M */ 389 { 47, 206 }, /* 154 M */ 390 { 31, 100 }, /* 205.25 M */ 391 { 107, 269 }, /* 268.50 M */ 392 { }, 393 }; 394 395 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */ 396 { 31, 211 }, /* 119 M */ 397 { 32, 199 }, /* 130.25 M */ 398 { 63, 307 }, /* 138.50 M */ 399 { 11, 60 }, /* 148.50 M */ 400 { 50, 263 }, /* 154 M */ 401 { 31, 120 }, /* 205.25 M */ 402 { 119, 359 }, /* 268.50 M */ 403 { }, 404 }; 405 406 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 407 unsigned long parent_rate) 408 { 409 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 410 struct freq_tbl f = *rcg->freq_tbl; 411 const struct frac_entry *frac; 412 int delta = 100000; 413 s64 src_rate = parent_rate; 414 s64 request; 415 u32 mask = BIT(rcg->hid_width) - 1; 416 u32 hid_div; 417 418 if (src_rate == 810000000) 419 frac = frac_table_810m; 420 else 421 frac = frac_table_675m; 422 423 for (; frac->num; frac++) { 424 request = rate; 425 request *= frac->den; 426 request = div_s64(request, frac->num); 427 if ((src_rate < (request - delta)) || 428 (src_rate > (request + delta))) 429 continue; 430 431 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 432 &hid_div); 433 f.pre_div = hid_div; 434 f.pre_div >>= CFG_SRC_DIV_SHIFT; 435 f.pre_div &= mask; 436 f.m = frac->num; 437 f.n = frac->den; 438 439 return clk_rcg2_configure(rcg, &f); 440 } 441 442 return -EINVAL; 443 } 444 445 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw, 446 unsigned long rate, unsigned long parent_rate, u8 index) 447 { 448 /* Parent index is set statically in frequency table */ 449 return clk_edp_pixel_set_rate(hw, rate, parent_rate); 450 } 451 452 static int clk_edp_pixel_determine_rate(struct clk_hw *hw, 453 struct clk_rate_request *req) 454 { 455 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 456 const struct freq_tbl *f = rcg->freq_tbl; 457 const struct frac_entry *frac; 458 int delta = 100000; 459 s64 request; 460 u32 mask = BIT(rcg->hid_width) - 1; 461 u32 hid_div; 462 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 463 464 /* Force the correct parent */ 465 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index); 466 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw); 467 468 if (req->best_parent_rate == 810000000) 469 frac = frac_table_810m; 470 else 471 frac = frac_table_675m; 472 473 for (; frac->num; frac++) { 474 request = req->rate; 475 request *= frac->den; 476 request = div_s64(request, frac->num); 477 if ((req->best_parent_rate < (request - delta)) || 478 (req->best_parent_rate > (request + delta))) 479 continue; 480 481 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 482 &hid_div); 483 hid_div >>= CFG_SRC_DIV_SHIFT; 484 hid_div &= mask; 485 486 req->rate = calc_rate(req->best_parent_rate, 487 frac->num, frac->den, 488 !!frac->den, hid_div); 489 return 0; 490 } 491 492 return -EINVAL; 493 } 494 495 const struct clk_ops clk_edp_pixel_ops = { 496 .is_enabled = clk_rcg2_is_enabled, 497 .get_parent = clk_rcg2_get_parent, 498 .set_parent = clk_rcg2_set_parent, 499 .recalc_rate = clk_rcg2_recalc_rate, 500 .set_rate = clk_edp_pixel_set_rate, 501 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent, 502 .determine_rate = clk_edp_pixel_determine_rate, 503 }; 504 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 505 506 static int clk_byte_determine_rate(struct clk_hw *hw, 507 struct clk_rate_request *req) 508 { 509 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 510 const struct freq_tbl *f = rcg->freq_tbl; 511 int index = qcom_find_src_index(hw, rcg->parent_map, f->src); 512 unsigned long parent_rate, div; 513 u32 mask = BIT(rcg->hid_width) - 1; 514 struct clk_hw *p; 515 516 if (req->rate == 0) 517 return -EINVAL; 518 519 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index); 520 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate); 521 522 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1; 523 div = min_t(u32, div, mask); 524 525 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 526 527 return 0; 528 } 529 530 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate, 531 unsigned long parent_rate) 532 { 533 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 534 struct freq_tbl f = *rcg->freq_tbl; 535 unsigned long div; 536 u32 mask = BIT(rcg->hid_width) - 1; 537 538 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 539 div = min_t(u32, div, mask); 540 541 f.pre_div = div; 542 543 return clk_rcg2_configure(rcg, &f); 544 } 545 546 static int clk_byte_set_rate_and_parent(struct clk_hw *hw, 547 unsigned long rate, unsigned long parent_rate, u8 index) 548 { 549 /* Parent index is set statically in frequency table */ 550 return clk_byte_set_rate(hw, rate, parent_rate); 551 } 552 553 const struct clk_ops clk_byte_ops = { 554 .is_enabled = clk_rcg2_is_enabled, 555 .get_parent = clk_rcg2_get_parent, 556 .set_parent = clk_rcg2_set_parent, 557 .recalc_rate = clk_rcg2_recalc_rate, 558 .set_rate = clk_byte_set_rate, 559 .set_rate_and_parent = clk_byte_set_rate_and_parent, 560 .determine_rate = clk_byte_determine_rate, 561 }; 562 EXPORT_SYMBOL_GPL(clk_byte_ops); 563 564 static int clk_byte2_determine_rate(struct clk_hw *hw, 565 struct clk_rate_request *req) 566 { 567 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 568 unsigned long parent_rate, div; 569 u32 mask = BIT(rcg->hid_width) - 1; 570 struct clk_hw *p; 571 unsigned long rate = req->rate; 572 573 if (rate == 0) 574 return -EINVAL; 575 576 p = req->best_parent_hw; 577 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate); 578 579 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 580 div = min_t(u32, div, mask); 581 582 req->rate = calc_rate(parent_rate, 0, 0, 0, div); 583 584 return 0; 585 } 586 587 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate, 588 unsigned long parent_rate) 589 { 590 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 591 struct freq_tbl f = { 0 }; 592 unsigned long div; 593 int i, num_parents = clk_hw_get_num_parents(hw); 594 u32 mask = BIT(rcg->hid_width) - 1; 595 u32 cfg; 596 597 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 598 div = min_t(u32, div, mask); 599 600 f.pre_div = div; 601 602 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 603 cfg &= CFG_SRC_SEL_MASK; 604 cfg >>= CFG_SRC_SEL_SHIFT; 605 606 for (i = 0; i < num_parents; i++) { 607 if (cfg == rcg->parent_map[i].cfg) { 608 f.src = rcg->parent_map[i].src; 609 return clk_rcg2_configure(rcg, &f); 610 } 611 } 612 613 return -EINVAL; 614 } 615 616 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw, 617 unsigned long rate, unsigned long parent_rate, u8 index) 618 { 619 /* Read the hardware to determine parent during set_rate */ 620 return clk_byte2_set_rate(hw, rate, parent_rate); 621 } 622 623 const struct clk_ops clk_byte2_ops = { 624 .is_enabled = clk_rcg2_is_enabled, 625 .get_parent = clk_rcg2_get_parent, 626 .set_parent = clk_rcg2_set_parent, 627 .recalc_rate = clk_rcg2_recalc_rate, 628 .set_rate = clk_byte2_set_rate, 629 .set_rate_and_parent = clk_byte2_set_rate_and_parent, 630 .determine_rate = clk_byte2_determine_rate, 631 }; 632 EXPORT_SYMBOL_GPL(clk_byte2_ops); 633 634 static const struct frac_entry frac_table_pixel[] = { 635 { 3, 8 }, 636 { 2, 9 }, 637 { 4, 9 }, 638 { 1, 1 }, 639 { } 640 }; 641 642 static int clk_pixel_determine_rate(struct clk_hw *hw, 643 struct clk_rate_request *req) 644 { 645 unsigned long request, src_rate; 646 int delta = 100000; 647 const struct frac_entry *frac = frac_table_pixel; 648 649 for (; frac->num; frac++) { 650 request = (req->rate * frac->den) / frac->num; 651 652 src_rate = clk_hw_round_rate(req->best_parent_hw, request); 653 if ((src_rate < (request - delta)) || 654 (src_rate > (request + delta))) 655 continue; 656 657 req->best_parent_rate = src_rate; 658 req->rate = (src_rate * frac->num) / frac->den; 659 return 0; 660 } 661 662 return -EINVAL; 663 } 664 665 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate, 666 unsigned long parent_rate) 667 { 668 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 669 struct freq_tbl f = { 0 }; 670 const struct frac_entry *frac = frac_table_pixel; 671 unsigned long request; 672 int delta = 100000; 673 u32 mask = BIT(rcg->hid_width) - 1; 674 u32 hid_div, cfg; 675 int i, num_parents = clk_hw_get_num_parents(hw); 676 677 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 678 cfg &= CFG_SRC_SEL_MASK; 679 cfg >>= CFG_SRC_SEL_SHIFT; 680 681 for (i = 0; i < num_parents; i++) 682 if (cfg == rcg->parent_map[i].cfg) { 683 f.src = rcg->parent_map[i].src; 684 break; 685 } 686 687 for (; frac->num; frac++) { 688 request = (rate * frac->den) / frac->num; 689 690 if ((parent_rate < (request - delta)) || 691 (parent_rate > (request + delta))) 692 continue; 693 694 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 695 &hid_div); 696 f.pre_div = hid_div; 697 f.pre_div >>= CFG_SRC_DIV_SHIFT; 698 f.pre_div &= mask; 699 f.m = frac->num; 700 f.n = frac->den; 701 702 return clk_rcg2_configure(rcg, &f); 703 } 704 return -EINVAL; 705 } 706 707 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 708 unsigned long parent_rate, u8 index) 709 { 710 return clk_pixel_set_rate(hw, rate, parent_rate); 711 } 712 713 const struct clk_ops clk_pixel_ops = { 714 .is_enabled = clk_rcg2_is_enabled, 715 .get_parent = clk_rcg2_get_parent, 716 .set_parent = clk_rcg2_set_parent, 717 .recalc_rate = clk_rcg2_recalc_rate, 718 .set_rate = clk_pixel_set_rate, 719 .set_rate_and_parent = clk_pixel_set_rate_and_parent, 720 .determine_rate = clk_pixel_determine_rate, 721 }; 722 EXPORT_SYMBOL_GPL(clk_pixel_ops); 723 724 static int clk_gfx3d_determine_rate(struct clk_hw *hw, 725 struct clk_rate_request *req) 726 { 727 struct clk_rate_request parent_req = { }; 728 struct clk_hw *p2, *p8, *p9, *xo; 729 unsigned long p9_rate; 730 int ret; 731 732 xo = clk_hw_get_parent_by_index(hw, 0); 733 if (req->rate == clk_hw_get_rate(xo)) { 734 req->best_parent_hw = xo; 735 return 0; 736 } 737 738 p9 = clk_hw_get_parent_by_index(hw, 2); 739 p2 = clk_hw_get_parent_by_index(hw, 3); 740 p8 = clk_hw_get_parent_by_index(hw, 4); 741 742 /* PLL9 is a fixed rate PLL */ 743 p9_rate = clk_hw_get_rate(p9); 744 745 parent_req.rate = req->rate = min(req->rate, p9_rate); 746 if (req->rate == p9_rate) { 747 req->rate = req->best_parent_rate = p9_rate; 748 req->best_parent_hw = p9; 749 return 0; 750 } 751 752 if (req->best_parent_hw == p9) { 753 /* Are we going back to a previously used rate? */ 754 if (clk_hw_get_rate(p8) == req->rate) 755 req->best_parent_hw = p8; 756 else 757 req->best_parent_hw = p2; 758 } else if (req->best_parent_hw == p8) { 759 req->best_parent_hw = p2; 760 } else { 761 req->best_parent_hw = p8; 762 } 763 764 ret = __clk_determine_rate(req->best_parent_hw, &parent_req); 765 if (ret) 766 return ret; 767 768 req->rate = req->best_parent_rate = parent_req.rate; 769 770 return 0; 771 } 772 773 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 774 unsigned long parent_rate, u8 index) 775 { 776 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 777 u32 cfg; 778 int ret; 779 780 /* Just mux it, we don't use the division or m/n hardware */ 781 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; 782 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg); 783 if (ret) 784 return ret; 785 786 return update_config(rcg); 787 } 788 789 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate, 790 unsigned long parent_rate) 791 { 792 /* 793 * We should never get here; clk_gfx3d_determine_rate() should always 794 * make us use a different parent than what we're currently using, so 795 * clk_gfx3d_set_rate_and_parent() should always be called. 796 */ 797 return 0; 798 } 799 800 const struct clk_ops clk_gfx3d_ops = { 801 .is_enabled = clk_rcg2_is_enabled, 802 .get_parent = clk_rcg2_get_parent, 803 .set_parent = clk_rcg2_set_parent, 804 .recalc_rate = clk_rcg2_recalc_rate, 805 .set_rate = clk_gfx3d_set_rate, 806 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent, 807 .determine_rate = clk_gfx3d_determine_rate, 808 }; 809 EXPORT_SYMBOL_GPL(clk_gfx3d_ops); 810 811 static int clk_rcg2_set_force_enable(struct clk_hw *hw) 812 { 813 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 814 const char *name = clk_hw_get_name(hw); 815 int ret, count; 816 817 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 818 CMD_ROOT_EN, CMD_ROOT_EN); 819 if (ret) 820 return ret; 821 822 /* wait for RCG to turn ON */ 823 for (count = 500; count > 0; count--) { 824 if (clk_rcg2_is_enabled(hw)) 825 return 0; 826 827 udelay(1); 828 } 829 830 pr_err("%s: RCG did not turn on\n", name); 831 return -ETIMEDOUT; 832 } 833 834 static int clk_rcg2_clear_force_enable(struct clk_hw *hw) 835 { 836 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 837 838 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, 839 CMD_ROOT_EN, 0); 840 } 841 842 static int 843 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f) 844 { 845 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 846 int ret; 847 848 ret = clk_rcg2_set_force_enable(hw); 849 if (ret) 850 return ret; 851 852 ret = clk_rcg2_configure(rcg, f); 853 if (ret) 854 return ret; 855 856 return clk_rcg2_clear_force_enable(hw); 857 } 858 859 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, 860 unsigned long parent_rate) 861 { 862 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 863 const struct freq_tbl *f; 864 865 f = qcom_find_freq(rcg->freq_tbl, rate); 866 if (!f) 867 return -EINVAL; 868 869 /* 870 * In case clock is disabled, update the CFG, M, N and D registers 871 * and don't hit the update bit of CMD register. 872 */ 873 if (!__clk_is_enabled(hw->clk)) 874 return __clk_rcg2_configure(rcg, f); 875 876 return clk_rcg2_shared_force_enable_clear(hw, f); 877 } 878 879 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw, 880 unsigned long rate, unsigned long parent_rate, u8 index) 881 { 882 return clk_rcg2_shared_set_rate(hw, rate, parent_rate); 883 } 884 885 static int clk_rcg2_shared_enable(struct clk_hw *hw) 886 { 887 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 888 int ret; 889 890 /* 891 * Set the update bit because required configuration has already 892 * been written in clk_rcg2_shared_set_rate() 893 */ 894 ret = clk_rcg2_set_force_enable(hw); 895 if (ret) 896 return ret; 897 898 ret = update_config(rcg); 899 if (ret) 900 return ret; 901 902 return clk_rcg2_clear_force_enable(hw); 903 } 904 905 static void clk_rcg2_shared_disable(struct clk_hw *hw) 906 { 907 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 908 u32 cfg; 909 910 /* 911 * Store current configuration as switching to safe source would clear 912 * the SRC and DIV of CFG register 913 */ 914 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 915 916 /* 917 * Park the RCG at a safe configuration - sourced off of safe source. 918 * Force enable and disable the RCG while configuring it to safeguard 919 * against any update signal coming from the downstream clock. 920 * The current parent is still prepared and enabled at this point, and 921 * the safe source is always on while application processor subsystem 922 * is online. Therefore, the RCG can safely switch its parent. 923 */ 924 clk_rcg2_set_force_enable(hw); 925 926 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 927 rcg->safe_src_index << CFG_SRC_SEL_SHIFT); 928 929 update_config(rcg); 930 931 clk_rcg2_clear_force_enable(hw); 932 933 /* Write back the stored configuration corresponding to current rate */ 934 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg); 935 } 936 937 const struct clk_ops clk_rcg2_shared_ops = { 938 .enable = clk_rcg2_shared_enable, 939 .disable = clk_rcg2_shared_disable, 940 .get_parent = clk_rcg2_get_parent, 941 .set_parent = clk_rcg2_set_parent, 942 .recalc_rate = clk_rcg2_recalc_rate, 943 .determine_rate = clk_rcg2_determine_rate, 944 .set_rate = clk_rcg2_shared_set_rate, 945 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, 946 }; 947 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); 948 949 /* Common APIs to be used for DFS based RCGR */ 950 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, 951 struct freq_tbl *f) 952 { 953 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 954 struct clk_hw *p; 955 unsigned long prate = 0; 956 u32 val, mask, cfg, mode; 957 int i, num_parents; 958 959 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); 960 961 mask = BIT(rcg->hid_width) - 1; 962 f->pre_div = 1; 963 if (cfg & mask) 964 f->pre_div = cfg & mask; 965 966 cfg &= CFG_SRC_SEL_MASK; 967 cfg >>= CFG_SRC_SEL_SHIFT; 968 969 num_parents = clk_hw_get_num_parents(hw); 970 for (i = 0; i < num_parents; i++) { 971 if (cfg == rcg->parent_map[i].cfg) { 972 f->src = rcg->parent_map[i].src; 973 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); 974 prate = clk_hw_get_rate(p); 975 } 976 } 977 978 mode = cfg & CFG_MODE_MASK; 979 mode >>= CFG_MODE_SHIFT; 980 if (mode) { 981 mask = BIT(rcg->mnd_width) - 1; 982 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l), 983 &val); 984 val &= mask; 985 f->m = val; 986 987 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l), 988 &val); 989 val = ~val; 990 val &= mask; 991 val += f->m; 992 f->n = val; 993 } 994 995 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div); 996 } 997 998 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg) 999 { 1000 struct freq_tbl *freq_tbl; 1001 int i; 1002 1003 /* Allocate space for 1 extra since table is NULL terminated */ 1004 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL); 1005 if (!freq_tbl) 1006 return -ENOMEM; 1007 rcg->freq_tbl = freq_tbl; 1008 1009 for (i = 0; i < MAX_PERF_LEVEL; i++) 1010 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i); 1011 1012 return 0; 1013 } 1014 1015 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw, 1016 struct clk_rate_request *req) 1017 { 1018 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1019 int ret; 1020 1021 if (!rcg->freq_tbl) { 1022 ret = clk_rcg2_dfs_populate_freq_table(rcg); 1023 if (ret) { 1024 pr_err("Failed to update DFS tables for %s\n", 1025 clk_hw_get_name(hw)); 1026 return ret; 1027 } 1028 } 1029 1030 return clk_rcg2_determine_rate(hw, req); 1031 } 1032 1033 static unsigned long 1034 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1035 { 1036 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1037 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div; 1038 1039 regmap_read(rcg->clkr.regmap, 1040 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level); 1041 level &= GENMASK(4, 1); 1042 level >>= 1; 1043 1044 if (rcg->freq_tbl) 1045 return rcg->freq_tbl[level].freq; 1046 1047 /* 1048 * Assume that parent_rate is actually the parent because 1049 * we can't do any better at figuring it out when the table 1050 * hasn't been populated yet. We only populate the table 1051 * in determine_rate because we can't guarantee the parents 1052 * will be registered with the framework until then. 1053 */ 1054 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level), 1055 &cfg); 1056 1057 mask = BIT(rcg->hid_width) - 1; 1058 pre_div = 1; 1059 if (cfg & mask) 1060 pre_div = cfg & mask; 1061 1062 mode = cfg & CFG_MODE_MASK; 1063 mode >>= CFG_MODE_SHIFT; 1064 if (mode) { 1065 mask = BIT(rcg->mnd_width) - 1; 1066 regmap_read(rcg->clkr.regmap, 1067 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m); 1068 m &= mask; 1069 1070 regmap_read(rcg->clkr.regmap, 1071 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n); 1072 n = ~n; 1073 n &= mask; 1074 n += m; 1075 } 1076 1077 return calc_rate(parent_rate, m, n, mode, pre_div); 1078 } 1079 1080 static const struct clk_ops clk_rcg2_dfs_ops = { 1081 .is_enabled = clk_rcg2_is_enabled, 1082 .get_parent = clk_rcg2_get_parent, 1083 .determine_rate = clk_rcg2_dfs_determine_rate, 1084 .recalc_rate = clk_rcg2_dfs_recalc_rate, 1085 }; 1086 1087 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data, 1088 struct regmap *regmap) 1089 { 1090 struct clk_rcg2 *rcg = data->rcg; 1091 struct clk_init_data *init = data->init; 1092 u32 val; 1093 int ret; 1094 1095 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val); 1096 if (ret) 1097 return -EINVAL; 1098 1099 if (!(val & SE_CMD_DFS_EN)) 1100 return 0; 1101 1102 /* 1103 * Rate changes with consumer writing a register in 1104 * their own I/O region 1105 */ 1106 init->flags |= CLK_GET_RATE_NOCACHE; 1107 init->ops = &clk_rcg2_dfs_ops; 1108 1109 rcg->freq_tbl = NULL; 1110 1111 return 0; 1112 } 1113 1114 int qcom_cc_register_rcg_dfs(struct regmap *regmap, 1115 const struct clk_rcg_dfs_data *rcgs, size_t len) 1116 { 1117 int i, ret; 1118 1119 for (i = 0; i < len; i++) { 1120 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap); 1121 if (ret) 1122 return ret; 1123 } 1124 1125 return 0; 1126 } 1127 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs); 1128 1129 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate, 1130 unsigned long parent_rate) 1131 { 1132 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1133 struct freq_tbl f = { 0 }; 1134 u32 mask = BIT(rcg->hid_width) - 1; 1135 u32 hid_div, cfg; 1136 int i, num_parents = clk_hw_get_num_parents(hw); 1137 unsigned long num, den; 1138 1139 rational_best_approximation(parent_rate, rate, 1140 GENMASK(rcg->mnd_width - 1, 0), 1141 GENMASK(rcg->mnd_width - 1, 0), &den, &num); 1142 1143 if (!num || !den) 1144 return -EINVAL; 1145 1146 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); 1147 hid_div = cfg; 1148 cfg &= CFG_SRC_SEL_MASK; 1149 cfg >>= CFG_SRC_SEL_SHIFT; 1150 1151 for (i = 0; i < num_parents; i++) { 1152 if (cfg == rcg->parent_map[i].cfg) { 1153 f.src = rcg->parent_map[i].src; 1154 break; 1155 } 1156 } 1157 1158 f.pre_div = hid_div; 1159 f.pre_div >>= CFG_SRC_DIV_SHIFT; 1160 f.pre_div &= mask; 1161 1162 if (num != den) { 1163 f.m = num; 1164 f.n = den; 1165 } else { 1166 f.m = 0; 1167 f.n = 0; 1168 } 1169 1170 return clk_rcg2_configure(rcg, &f); 1171 } 1172 1173 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw, 1174 unsigned long rate, unsigned long parent_rate, u8 index) 1175 { 1176 return clk_rcg2_dp_set_rate(hw, rate, parent_rate); 1177 } 1178 1179 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw, 1180 struct clk_rate_request *req) 1181 { 1182 struct clk_rate_request parent_req = *req; 1183 int ret; 1184 1185 ret = __clk_determine_rate(clk_hw_get_parent(hw), &parent_req); 1186 if (ret) 1187 return ret; 1188 1189 req->best_parent_rate = parent_req.rate; 1190 1191 return 0; 1192 } 1193 1194 const struct clk_ops clk_dp_ops = { 1195 .is_enabled = clk_rcg2_is_enabled, 1196 .get_parent = clk_rcg2_get_parent, 1197 .set_parent = clk_rcg2_set_parent, 1198 .recalc_rate = clk_rcg2_recalc_rate, 1199 .set_rate = clk_rcg2_dp_set_rate, 1200 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent, 1201 .determine_rate = clk_rcg2_dp_determine_rate, 1202 }; 1203 EXPORT_SYMBOL_GPL(clk_dp_ops); 1204