1 /* 2 * Copyright (C) 2013 Broadcom Corporation 3 * Copyright 2013 Linaro Limited 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation version 2. 8 * 9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 * kind, whether express or implied; without even the implied warranty 11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include "clk-kona.h" 16 17 #include <linux/delay.h> 18 #include <linux/kernel.h> 19 #include <linux/clk.h> 20 21 /* 22 * "Policies" affect the frequencies of bus clocks provided by a 23 * CCU. (I believe these polices are named "Deep Sleep", "Economy", 24 * "Normal", and "Turbo".) A lower policy number has lower power 25 * consumption, and policy 2 is the default. 26 */ 27 #define CCU_POLICY_COUNT 4 28 29 #define CCU_ACCESS_PASSWORD 0xA5A500 30 #define CLK_GATE_DELAY_LOOP 2000 31 32 /* Bitfield operations */ 33 34 /* Produces a mask of set bits covering a range of a 32-bit value */ 35 static inline u32 bitfield_mask(u32 shift, u32 width) 36 { 37 return ((1 << width) - 1) << shift; 38 } 39 40 /* Extract the value of a bitfield found within a given register value */ 41 static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width) 42 { 43 return (reg_val & bitfield_mask(shift, width)) >> shift; 44 } 45 46 /* Replace the value of a bitfield found within a given register value */ 47 static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val) 48 { 49 u32 mask = bitfield_mask(shift, width); 50 51 return (reg_val & ~mask) | (val << shift); 52 } 53 54 /* Divider and scaling helpers */ 55 56 /* Convert a divider into the scaled divisor value it represents. */ 57 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) 58 { 59 return (u64)reg_div + ((u64)1 << div->u.s.frac_width); 60 } 61 62 /* 63 * Build a scaled divider value as close as possible to the 64 * given whole part (div_value) and fractional part (expressed 65 * in billionths). 66 */ 67 u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) 68 { 69 u64 combined; 70 71 BUG_ON(!div_value); 72 BUG_ON(billionths >= BILLION); 73 74 combined = (u64)div_value * BILLION + billionths; 75 combined <<= div->u.s.frac_width; 76 77 return DIV_ROUND_CLOSEST_ULL(combined, BILLION); 78 } 79 80 /* The scaled minimum divisor representable by a divider */ 81 static inline u64 82 scaled_div_min(struct bcm_clk_div *div) 83 { 84 if (divider_is_fixed(div)) 85 return (u64)div->u.fixed; 86 87 return scaled_div_value(div, 0); 88 } 89 90 /* The scaled maximum divisor representable by a divider */ 91 u64 scaled_div_max(struct bcm_clk_div *div) 92 { 93 u32 reg_div; 94 95 if (divider_is_fixed(div)) 96 return (u64)div->u.fixed; 97 98 reg_div = ((u32)1 << div->u.s.width) - 1; 99 100 return scaled_div_value(div, reg_div); 101 } 102 103 /* 104 * Convert a scaled divisor into its divider representation as 105 * stored in a divider register field. 106 */ 107 static inline u32 108 divider(struct bcm_clk_div *div, u64 scaled_div) 109 { 110 BUG_ON(scaled_div < scaled_div_min(div)); 111 BUG_ON(scaled_div > scaled_div_max(div)); 112 113 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width)); 114 } 115 116 /* Return a rate scaled for use when dividing by a scaled divisor. */ 117 static inline u64 118 scale_rate(struct bcm_clk_div *div, u32 rate) 119 { 120 if (divider_is_fixed(div)) 121 return (u64)rate; 122 123 return (u64)rate << div->u.s.frac_width; 124 } 125 126 /* CCU access */ 127 128 /* Read a 32-bit register value from a CCU's address space. */ 129 static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset) 130 { 131 return readl(ccu->base + reg_offset); 132 } 133 134 /* Write a 32-bit register value into a CCU's address space. */ 135 static inline void 136 __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val) 137 { 138 writel(reg_val, ccu->base + reg_offset); 139 } 140 141 static inline unsigned long ccu_lock(struct ccu_data *ccu) 142 { 143 unsigned long flags; 144 145 spin_lock_irqsave(&ccu->lock, flags); 146 147 return flags; 148 } 149 static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags) 150 { 151 spin_unlock_irqrestore(&ccu->lock, flags); 152 } 153 154 /* 155 * Enable/disable write access to CCU protected registers. The 156 * WR_ACCESS register for all CCUs is at offset 0. 157 */ 158 static inline void __ccu_write_enable(struct ccu_data *ccu) 159 { 160 if (ccu->write_enabled) { 161 pr_err("%s: access already enabled for %s\n", __func__, 162 ccu->name); 163 return; 164 } 165 ccu->write_enabled = true; 166 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1); 167 } 168 169 static inline void __ccu_write_disable(struct ccu_data *ccu) 170 { 171 if (!ccu->write_enabled) { 172 pr_err("%s: access wasn't enabled for %s\n", __func__, 173 ccu->name); 174 return; 175 } 176 177 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD); 178 ccu->write_enabled = false; 179 } 180 181 /* 182 * Poll a register in a CCU's address space, returning when the 183 * specified bit in that register's value is set (or clear). Delay 184 * a microsecond after each read of the register. Returns true if 185 * successful, or false if we gave up trying. 186 * 187 * Caller must ensure the CCU lock is held. 188 */ 189 static inline bool 190 __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want) 191 { 192 unsigned int tries; 193 u32 bit_mask = 1 << bit; 194 195 for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) { 196 u32 val; 197 bool bit_val; 198 199 val = __ccu_read(ccu, reg_offset); 200 bit_val = (val & bit_mask) != 0; 201 if (bit_val == want) 202 return true; 203 udelay(1); 204 } 205 pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__, 206 ccu->name, reg_offset, bit, want ? "set" : "clear"); 207 208 return false; 209 } 210 211 /* Policy operations */ 212 213 static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync) 214 { 215 struct bcm_policy_ctl *control = &ccu->policy.control; 216 u32 offset; 217 u32 go_bit; 218 u32 mask; 219 bool ret; 220 221 /* If we don't need to control policy for this CCU, we're done. */ 222 if (!policy_ctl_exists(control)) 223 return true; 224 225 offset = control->offset; 226 go_bit = control->go_bit; 227 228 /* Ensure we're not busy before we start */ 229 ret = __ccu_wait_bit(ccu, offset, go_bit, false); 230 if (!ret) { 231 pr_err("%s: ccu %s policy engine wouldn't go idle\n", 232 __func__, ccu->name); 233 return false; 234 } 235 236 /* 237 * If it's a synchronous request, we'll wait for the voltage 238 * and frequency of the active load to stabilize before 239 * returning. To do this we select the active load by 240 * setting the ATL bit. 241 * 242 * An asynchronous request instead ramps the voltage in the 243 * background, and when that process stabilizes, the target 244 * load is copied to the active load and the CCU frequency 245 * is switched. We do this by selecting the target load 246 * (ATL bit clear) and setting the request auto-copy (AC bit 247 * set). 248 * 249 * Note, we do NOT read-modify-write this register. 250 */ 251 mask = (u32)1 << go_bit; 252 if (sync) 253 mask |= 1 << control->atl_bit; 254 else 255 mask |= 1 << control->ac_bit; 256 __ccu_write(ccu, offset, mask); 257 258 /* Wait for indication that operation is complete. */ 259 ret = __ccu_wait_bit(ccu, offset, go_bit, false); 260 if (!ret) 261 pr_err("%s: ccu %s policy engine never started\n", 262 __func__, ccu->name); 263 264 return ret; 265 } 266 267 static bool __ccu_policy_engine_stop(struct ccu_data *ccu) 268 { 269 struct bcm_lvm_en *enable = &ccu->policy.enable; 270 u32 offset; 271 u32 enable_bit; 272 bool ret; 273 274 /* If we don't need to control policy for this CCU, we're done. */ 275 if (!policy_lvm_en_exists(enable)) 276 return true; 277 278 /* Ensure we're not busy before we start */ 279 offset = enable->offset; 280 enable_bit = enable->bit; 281 ret = __ccu_wait_bit(ccu, offset, enable_bit, false); 282 if (!ret) { 283 pr_err("%s: ccu %s policy engine already stopped\n", 284 __func__, ccu->name); 285 return false; 286 } 287 288 /* Now set the bit to stop the engine (NO read-modify-write) */ 289 __ccu_write(ccu, offset, (u32)1 << enable_bit); 290 291 /* Wait for indication that it has stopped. */ 292 ret = __ccu_wait_bit(ccu, offset, enable_bit, false); 293 if (!ret) 294 pr_err("%s: ccu %s policy engine never stopped\n", 295 __func__, ccu->name); 296 297 return ret; 298 } 299 300 /* 301 * A CCU has four operating conditions ("policies"), and some clocks 302 * can be disabled or enabled based on which policy is currently in 303 * effect. Such clocks have a bit in a "policy mask" register for 304 * each policy indicating whether the clock is enabled for that 305 * policy or not. The bit position for a clock is the same for all 306 * four registers, and the 32-bit registers are at consecutive 307 * addresses. 308 */ 309 static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy) 310 { 311 u32 offset; 312 u32 mask; 313 int i; 314 bool ret; 315 316 if (!policy_exists(policy)) 317 return true; 318 319 /* 320 * We need to stop the CCU policy engine to allow update 321 * of our policy bits. 322 */ 323 if (!__ccu_policy_engine_stop(ccu)) { 324 pr_err("%s: unable to stop CCU %s policy engine\n", 325 __func__, ccu->name); 326 return false; 327 } 328 329 /* 330 * For now, if a clock defines its policy bit we just mark 331 * it "enabled" for all four policies. 332 */ 333 offset = policy->offset; 334 mask = (u32)1 << policy->bit; 335 for (i = 0; i < CCU_POLICY_COUNT; i++) { 336 u32 reg_val; 337 338 reg_val = __ccu_read(ccu, offset); 339 reg_val |= mask; 340 __ccu_write(ccu, offset, reg_val); 341 offset += sizeof(u32); 342 } 343 344 /* We're done updating; fire up the policy engine again. */ 345 ret = __ccu_policy_engine_start(ccu, true); 346 if (!ret) 347 pr_err("%s: unable to restart CCU %s policy engine\n", 348 __func__, ccu->name); 349 350 return ret; 351 } 352 353 /* Gate operations */ 354 355 /* Determine whether a clock is gated. CCU lock must be held. */ 356 static bool 357 __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) 358 { 359 u32 bit_mask; 360 u32 reg_val; 361 362 /* If there is no gate we can assume it's enabled. */ 363 if (!gate_exists(gate)) 364 return true; 365 366 bit_mask = 1 << gate->status_bit; 367 reg_val = __ccu_read(ccu, gate->offset); 368 369 return (reg_val & bit_mask) != 0; 370 } 371 372 /* Determine whether a clock is gated. */ 373 static bool 374 is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) 375 { 376 long flags; 377 bool ret; 378 379 /* Avoid taking the lock if we can */ 380 if (!gate_exists(gate)) 381 return true; 382 383 flags = ccu_lock(ccu); 384 ret = __is_clk_gate_enabled(ccu, gate); 385 ccu_unlock(ccu, flags); 386 387 return ret; 388 } 389 390 /* 391 * Commit our desired gate state to the hardware. 392 * Returns true if successful, false otherwise. 393 */ 394 static bool 395 __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate) 396 { 397 u32 reg_val; 398 u32 mask; 399 bool enabled = false; 400 401 BUG_ON(!gate_exists(gate)); 402 if (!gate_is_sw_controllable(gate)) 403 return true; /* Nothing we can change */ 404 405 reg_val = __ccu_read(ccu, gate->offset); 406 407 /* For a hardware/software gate, set which is in control */ 408 if (gate_is_hw_controllable(gate)) { 409 mask = (u32)1 << gate->hw_sw_sel_bit; 410 if (gate_is_sw_managed(gate)) 411 reg_val |= mask; 412 else 413 reg_val &= ~mask; 414 } 415 416 /* 417 * If software is in control, enable or disable the gate. 418 * If hardware is, clear the enabled bit for good measure. 419 * If a software controlled gate can't be disabled, we're 420 * required to write a 0 into the enable bit (but the gate 421 * will be enabled). 422 */ 423 mask = (u32)1 << gate->en_bit; 424 if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) && 425 !gate_is_no_disable(gate)) 426 reg_val |= mask; 427 else 428 reg_val &= ~mask; 429 430 __ccu_write(ccu, gate->offset, reg_val); 431 432 /* For a hardware controlled gate, we're done */ 433 if (!gate_is_sw_managed(gate)) 434 return true; 435 436 /* Otherwise wait for the gate to be in desired state */ 437 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled); 438 } 439 440 /* 441 * Initialize a gate. Our desired state (hardware/software select, 442 * and if software, its enable state) is committed to hardware 443 * without the usual checks to see if it's already set up that way. 444 * Returns true if successful, false otherwise. 445 */ 446 static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate) 447 { 448 if (!gate_exists(gate)) 449 return true; 450 return __gate_commit(ccu, gate); 451 } 452 453 /* 454 * Set a gate to enabled or disabled state. Does nothing if the 455 * gate is not currently under software control, or if it is already 456 * in the requested state. Returns true if successful, false 457 * otherwise. CCU lock must be held. 458 */ 459 static bool 460 __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable) 461 { 462 bool ret; 463 464 if (!gate_exists(gate) || !gate_is_sw_managed(gate)) 465 return true; /* Nothing to do */ 466 467 if (!enable && gate_is_no_disable(gate)) { 468 pr_warn("%s: invalid gate disable request (ignoring)\n", 469 __func__); 470 return true; 471 } 472 473 if (enable == gate_is_enabled(gate)) 474 return true; /* No change */ 475 476 gate_flip_enabled(gate); 477 ret = __gate_commit(ccu, gate); 478 if (!ret) 479 gate_flip_enabled(gate); /* Revert the change */ 480 481 return ret; 482 } 483 484 /* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */ 485 static int clk_gate(struct ccu_data *ccu, const char *name, 486 struct bcm_clk_gate *gate, bool enable) 487 { 488 unsigned long flags; 489 bool success; 490 491 /* 492 * Avoid taking the lock if we can. We quietly ignore 493 * requests to change state that don't make sense. 494 */ 495 if (!gate_exists(gate) || !gate_is_sw_managed(gate)) 496 return 0; 497 if (!enable && gate_is_no_disable(gate)) 498 return 0; 499 500 flags = ccu_lock(ccu); 501 __ccu_write_enable(ccu); 502 503 success = __clk_gate(ccu, gate, enable); 504 505 __ccu_write_disable(ccu); 506 ccu_unlock(ccu, flags); 507 508 if (success) 509 return 0; 510 511 pr_err("%s: failed to %s gate for %s\n", __func__, 512 enable ? "enable" : "disable", name); 513 514 return -EIO; 515 } 516 517 /* Hysteresis operations */ 518 519 /* 520 * If a clock gate requires a turn-off delay it will have 521 * "hysteresis" register bits defined. The first, if set, enables 522 * the delay; and if enabled, the second bit determines whether the 523 * delay is "low" or "high" (1 means high). For now, if it's 524 * defined for a clock, we set it. 525 */ 526 static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst) 527 { 528 u32 offset; 529 u32 reg_val; 530 u32 mask; 531 532 if (!hyst_exists(hyst)) 533 return true; 534 535 offset = hyst->offset; 536 mask = (u32)1 << hyst->en_bit; 537 mask |= (u32)1 << hyst->val_bit; 538 539 reg_val = __ccu_read(ccu, offset); 540 reg_val |= mask; 541 __ccu_write(ccu, offset, reg_val); 542 543 return true; 544 } 545 546 /* Trigger operations */ 547 548 /* 549 * Caller must ensure CCU lock is held and access is enabled. 550 * Returns true if successful, false otherwise. 551 */ 552 static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig) 553 { 554 /* Trigger the clock and wait for it to finish */ 555 __ccu_write(ccu, trig->offset, 1 << trig->bit); 556 557 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false); 558 } 559 560 /* Divider operations */ 561 562 /* Read a divider value and return the scaled divisor it represents. */ 563 static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) 564 { 565 unsigned long flags; 566 u32 reg_val; 567 u32 reg_div; 568 569 if (divider_is_fixed(div)) 570 return (u64)div->u.fixed; 571 572 flags = ccu_lock(ccu); 573 reg_val = __ccu_read(ccu, div->u.s.offset); 574 ccu_unlock(ccu, flags); 575 576 /* Extract the full divider field from the register value */ 577 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width); 578 579 /* Return the scaled divisor value it represents */ 580 return scaled_div_value(div, reg_div); 581 } 582 583 /* 584 * Convert a divider's scaled divisor value into its recorded form 585 * and commit it into the hardware divider register. 586 * 587 * Returns 0 on success. Returns -EINVAL for invalid arguments. 588 * Returns -ENXIO if gating failed, and -EIO if a trigger failed. 589 */ 590 static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, 591 struct bcm_clk_div *div, struct bcm_clk_trig *trig) 592 { 593 bool enabled; 594 u32 reg_div; 595 u32 reg_val; 596 int ret = 0; 597 598 BUG_ON(divider_is_fixed(div)); 599 600 /* 601 * If we're just initializing the divider, and no initial 602 * state was defined in the device tree, we just find out 603 * what its current value is rather than updating it. 604 */ 605 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) { 606 reg_val = __ccu_read(ccu, div->u.s.offset); 607 reg_div = bitfield_extract(reg_val, div->u.s.shift, 608 div->u.s.width); 609 div->u.s.scaled_div = scaled_div_value(div, reg_div); 610 611 return 0; 612 } 613 614 /* Convert the scaled divisor to the value we need to record */ 615 reg_div = divider(div, div->u.s.scaled_div); 616 617 /* Clock needs to be enabled before changing the rate */ 618 enabled = __is_clk_gate_enabled(ccu, gate); 619 if (!enabled && !__clk_gate(ccu, gate, true)) { 620 ret = -ENXIO; 621 goto out; 622 } 623 624 /* Replace the divider value and record the result */ 625 reg_val = __ccu_read(ccu, div->u.s.offset); 626 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width, 627 reg_div); 628 __ccu_write(ccu, div->u.s.offset, reg_val); 629 630 /* If the trigger fails we still want to disable the gate */ 631 if (!__clk_trigger(ccu, trig)) 632 ret = -EIO; 633 634 /* Disable the clock again if it was disabled to begin with */ 635 if (!enabled && !__clk_gate(ccu, gate, false)) 636 ret = ret ? ret : -ENXIO; /* return first error */ 637 out: 638 return ret; 639 } 640 641 /* 642 * Initialize a divider by committing our desired state to hardware 643 * without the usual checks to see if it's already set up that way. 644 * Returns true if successful, false otherwise. 645 */ 646 static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, 647 struct bcm_clk_div *div, struct bcm_clk_trig *trig) 648 { 649 if (!divider_exists(div) || divider_is_fixed(div)) 650 return true; 651 return !__div_commit(ccu, gate, div, trig); 652 } 653 654 static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, 655 struct bcm_clk_div *div, struct bcm_clk_trig *trig, 656 u64 scaled_div) 657 { 658 unsigned long flags; 659 u64 previous; 660 int ret; 661 662 BUG_ON(divider_is_fixed(div)); 663 664 previous = div->u.s.scaled_div; 665 if (previous == scaled_div) 666 return 0; /* No change */ 667 668 div->u.s.scaled_div = scaled_div; 669 670 flags = ccu_lock(ccu); 671 __ccu_write_enable(ccu); 672 673 ret = __div_commit(ccu, gate, div, trig); 674 675 __ccu_write_disable(ccu); 676 ccu_unlock(ccu, flags); 677 678 if (ret) 679 div->u.s.scaled_div = previous; /* Revert the change */ 680 681 return ret; 682 683 } 684 685 /* Common clock rate helpers */ 686 687 /* 688 * Implement the common clock framework recalc_rate method, taking 689 * into account a divider and an optional pre-divider. The 690 * pre-divider register pointer may be NULL. 691 */ 692 static unsigned long clk_recalc_rate(struct ccu_data *ccu, 693 struct bcm_clk_div *div, struct bcm_clk_div *pre_div, 694 unsigned long parent_rate) 695 { 696 u64 scaled_parent_rate; 697 u64 scaled_div; 698 u64 result; 699 700 if (!divider_exists(div)) 701 return parent_rate; 702 703 if (parent_rate > (unsigned long)LONG_MAX) 704 return 0; /* actually this would be a caller bug */ 705 706 /* 707 * If there is a pre-divider, divide the scaled parent rate 708 * by the pre-divider value first. In this case--to improve 709 * accuracy--scale the parent rate by *both* the pre-divider 710 * value and the divider before actually computing the 711 * result of the pre-divider. 712 * 713 * If there's only one divider, just scale the parent rate. 714 */ 715 if (pre_div && divider_exists(pre_div)) { 716 u64 scaled_rate; 717 718 scaled_rate = scale_rate(pre_div, parent_rate); 719 scaled_rate = scale_rate(div, scaled_rate); 720 scaled_div = divider_read_scaled(ccu, pre_div); 721 scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate, 722 scaled_div); 723 } else { 724 scaled_parent_rate = scale_rate(div, parent_rate); 725 } 726 727 /* 728 * Get the scaled divisor value, and divide the scaled 729 * parent rate by that to determine this clock's resulting 730 * rate. 731 */ 732 scaled_div = divider_read_scaled(ccu, div); 733 result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div); 734 735 return (unsigned long)result; 736 } 737 738 /* 739 * Compute the output rate produced when a given parent rate is fed 740 * into two dividers. The pre-divider can be NULL, and even if it's 741 * non-null it may be nonexistent. It's also OK for the divider to 742 * be nonexistent, and in that case the pre-divider is also ignored. 743 * 744 * If scaled_div is non-null, it is used to return the scaled divisor 745 * value used by the (downstream) divider to produce that rate. 746 */ 747 static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div, 748 struct bcm_clk_div *pre_div, 749 unsigned long rate, unsigned long parent_rate, 750 u64 *scaled_div) 751 { 752 u64 scaled_parent_rate; 753 u64 min_scaled_div; 754 u64 max_scaled_div; 755 u64 best_scaled_div; 756 u64 result; 757 758 BUG_ON(!divider_exists(div)); 759 BUG_ON(!rate); 760 BUG_ON(parent_rate > (u64)LONG_MAX); 761 762 /* 763 * If there is a pre-divider, divide the scaled parent rate 764 * by the pre-divider value first. In this case--to improve 765 * accuracy--scale the parent rate by *both* the pre-divider 766 * value and the divider before actually computing the 767 * result of the pre-divider. 768 * 769 * If there's only one divider, just scale the parent rate. 770 * 771 * For simplicity we treat the pre-divider as fixed (for now). 772 */ 773 if (divider_exists(pre_div)) { 774 u64 scaled_rate; 775 u64 scaled_pre_div; 776 777 scaled_rate = scale_rate(pre_div, parent_rate); 778 scaled_rate = scale_rate(div, scaled_rate); 779 scaled_pre_div = divider_read_scaled(ccu, pre_div); 780 scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate, 781 scaled_pre_div); 782 } else { 783 scaled_parent_rate = scale_rate(div, parent_rate); 784 } 785 786 /* 787 * Compute the best possible divider and ensure it is in 788 * range. A fixed divider can't be changed, so just report 789 * the best we can do. 790 */ 791 if (!divider_is_fixed(div)) { 792 best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, 793 rate); 794 min_scaled_div = scaled_div_min(div); 795 max_scaled_div = scaled_div_max(div); 796 if (best_scaled_div > max_scaled_div) 797 best_scaled_div = max_scaled_div; 798 else if (best_scaled_div < min_scaled_div) 799 best_scaled_div = min_scaled_div; 800 } else { 801 best_scaled_div = divider_read_scaled(ccu, div); 802 } 803 804 /* OK, figure out the resulting rate */ 805 result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div); 806 807 if (scaled_div) 808 *scaled_div = best_scaled_div; 809 810 return (long)result; 811 } 812 813 /* Common clock parent helpers */ 814 815 /* 816 * For a given parent selector (register field) value, find the 817 * index into a selector's parent_sel array that contains it. 818 * Returns the index, or BAD_CLK_INDEX if it's not found. 819 */ 820 static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel) 821 { 822 u8 i; 823 824 BUG_ON(sel->parent_count > (u32)U8_MAX); 825 for (i = 0; i < sel->parent_count; i++) 826 if (sel->parent_sel[i] == parent_sel) 827 return i; 828 return BAD_CLK_INDEX; 829 } 830 831 /* 832 * Fetch the current value of the selector, and translate that into 833 * its corresponding index in the parent array we registered with 834 * the clock framework. 835 * 836 * Returns parent array index that corresponds with the value found, 837 * or BAD_CLK_INDEX if the found value is out of range. 838 */ 839 static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel) 840 { 841 unsigned long flags; 842 u32 reg_val; 843 u32 parent_sel; 844 u8 index; 845 846 /* If there's no selector, there's only one parent */ 847 if (!selector_exists(sel)) 848 return 0; 849 850 /* Get the value in the selector register */ 851 flags = ccu_lock(ccu); 852 reg_val = __ccu_read(ccu, sel->offset); 853 ccu_unlock(ccu, flags); 854 855 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); 856 857 /* Look up that selector's parent array index and return it */ 858 index = parent_index(sel, parent_sel); 859 if (index == BAD_CLK_INDEX) 860 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n", 861 __func__, parent_sel, ccu->name, sel->offset); 862 863 return index; 864 } 865 866 /* 867 * Commit our desired selector value to the hardware. 868 * 869 * Returns 0 on success. Returns -EINVAL for invalid arguments. 870 * Returns -ENXIO if gating failed, and -EIO if a trigger failed. 871 */ 872 static int 873 __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, 874 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) 875 { 876 u32 parent_sel; 877 u32 reg_val; 878 bool enabled; 879 int ret = 0; 880 881 BUG_ON(!selector_exists(sel)); 882 883 /* 884 * If we're just initializing the selector, and no initial 885 * state was defined in the device tree, we just find out 886 * what its current value is rather than updating it. 887 */ 888 if (sel->clk_index == BAD_CLK_INDEX) { 889 u8 index; 890 891 reg_val = __ccu_read(ccu, sel->offset); 892 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); 893 index = parent_index(sel, parent_sel); 894 if (index == BAD_CLK_INDEX) 895 return -EINVAL; 896 sel->clk_index = index; 897 898 return 0; 899 } 900 901 BUG_ON((u32)sel->clk_index >= sel->parent_count); 902 parent_sel = sel->parent_sel[sel->clk_index]; 903 904 /* Clock needs to be enabled before changing the parent */ 905 enabled = __is_clk_gate_enabled(ccu, gate); 906 if (!enabled && !__clk_gate(ccu, gate, true)) 907 return -ENXIO; 908 909 /* Replace the selector value and record the result */ 910 reg_val = __ccu_read(ccu, sel->offset); 911 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel); 912 __ccu_write(ccu, sel->offset, reg_val); 913 914 /* If the trigger fails we still want to disable the gate */ 915 if (!__clk_trigger(ccu, trig)) 916 ret = -EIO; 917 918 /* Disable the clock again if it was disabled to begin with */ 919 if (!enabled && !__clk_gate(ccu, gate, false)) 920 ret = ret ? ret : -ENXIO; /* return first error */ 921 922 return ret; 923 } 924 925 /* 926 * Initialize a selector by committing our desired state to hardware 927 * without the usual checks to see if it's already set up that way. 928 * Returns true if successful, false otherwise. 929 */ 930 static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, 931 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) 932 { 933 if (!selector_exists(sel)) 934 return true; 935 return !__sel_commit(ccu, gate, sel, trig); 936 } 937 938 /* 939 * Write a new value into a selector register to switch to a 940 * different parent clock. Returns 0 on success, or an error code 941 * (from __sel_commit()) otherwise. 942 */ 943 static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, 944 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig, 945 u8 index) 946 { 947 unsigned long flags; 948 u8 previous; 949 int ret; 950 951 previous = sel->clk_index; 952 if (previous == index) 953 return 0; /* No change */ 954 955 sel->clk_index = index; 956 957 flags = ccu_lock(ccu); 958 __ccu_write_enable(ccu); 959 960 ret = __sel_commit(ccu, gate, sel, trig); 961 962 __ccu_write_disable(ccu); 963 ccu_unlock(ccu, flags); 964 965 if (ret) 966 sel->clk_index = previous; /* Revert the change */ 967 968 return ret; 969 } 970 971 /* Clock operations */ 972 973 static int kona_peri_clk_enable(struct clk_hw *hw) 974 { 975 struct kona_clk *bcm_clk = to_kona_clk(hw); 976 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 977 978 return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true); 979 } 980 981 static void kona_peri_clk_disable(struct clk_hw *hw) 982 { 983 struct kona_clk *bcm_clk = to_kona_clk(hw); 984 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 985 986 (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false); 987 } 988 989 static int kona_peri_clk_is_enabled(struct clk_hw *hw) 990 { 991 struct kona_clk *bcm_clk = to_kona_clk(hw); 992 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 993 994 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; 995 } 996 997 static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, 998 unsigned long parent_rate) 999 { 1000 struct kona_clk *bcm_clk = to_kona_clk(hw); 1001 struct peri_clk_data *data = bcm_clk->u.peri; 1002 1003 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, 1004 parent_rate); 1005 } 1006 1007 static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, 1008 unsigned long *parent_rate) 1009 { 1010 struct kona_clk *bcm_clk = to_kona_clk(hw); 1011 struct bcm_clk_div *div = &bcm_clk->u.peri->div; 1012 1013 if (!divider_exists(div)) 1014 return clk_hw_get_rate(hw); 1015 1016 /* Quietly avoid a zero rate */ 1017 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, 1018 rate ? rate : 1, *parent_rate, NULL); 1019 } 1020 1021 static int kona_peri_clk_determine_rate(struct clk_hw *hw, 1022 struct clk_rate_request *req) 1023 { 1024 struct kona_clk *bcm_clk = to_kona_clk(hw); 1025 struct clk_hw *current_parent; 1026 unsigned long parent_rate; 1027 unsigned long best_delta; 1028 unsigned long best_rate; 1029 u32 parent_count; 1030 long rate; 1031 u32 which; 1032 1033 /* 1034 * If there is no other parent to choose, use the current one. 1035 * Note: We don't honor (or use) CLK_SET_RATE_NO_REPARENT. 1036 */ 1037 WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT); 1038 parent_count = (u32)bcm_clk->init_data.num_parents; 1039 if (parent_count < 2) { 1040 rate = kona_peri_clk_round_rate(hw, req->rate, 1041 &req->best_parent_rate); 1042 if (rate < 0) 1043 return rate; 1044 1045 req->rate = rate; 1046 return 0; 1047 } 1048 1049 /* Unless we can do better, stick with current parent */ 1050 current_parent = clk_hw_get_parent(hw); 1051 parent_rate = clk_hw_get_rate(current_parent); 1052 best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate); 1053 best_delta = abs(best_rate - req->rate); 1054 1055 /* Check whether any other parent clock can produce a better result */ 1056 for (which = 0; which < parent_count; which++) { 1057 struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which); 1058 unsigned long delta; 1059 unsigned long other_rate; 1060 1061 BUG_ON(!parent); 1062 if (parent == current_parent) 1063 continue; 1064 1065 /* We don't support CLK_SET_RATE_PARENT */ 1066 parent_rate = clk_hw_get_rate(parent); 1067 other_rate = kona_peri_clk_round_rate(hw, req->rate, 1068 &parent_rate); 1069 delta = abs(other_rate - req->rate); 1070 if (delta < best_delta) { 1071 best_delta = delta; 1072 best_rate = other_rate; 1073 req->best_parent_hw = parent; 1074 req->best_parent_rate = parent_rate; 1075 } 1076 } 1077 1078 req->rate = best_rate; 1079 return 0; 1080 } 1081 1082 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) 1083 { 1084 struct kona_clk *bcm_clk = to_kona_clk(hw); 1085 struct peri_clk_data *data = bcm_clk->u.peri; 1086 struct bcm_clk_sel *sel = &data->sel; 1087 struct bcm_clk_trig *trig; 1088 int ret; 1089 1090 BUG_ON(index >= sel->parent_count); 1091 1092 /* If there's only one parent we don't require a selector */ 1093 if (!selector_exists(sel)) 1094 return 0; 1095 1096 /* 1097 * The regular trigger is used by default, but if there's a 1098 * pre-trigger we want to use that instead. 1099 */ 1100 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig 1101 : &data->trig; 1102 1103 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index); 1104 if (ret == -ENXIO) { 1105 pr_err("%s: gating failure for %s\n", __func__, 1106 bcm_clk->init_data.name); 1107 ret = -EIO; /* Don't proliferate weird errors */ 1108 } else if (ret == -EIO) { 1109 pr_err("%s: %strigger failed for %s\n", __func__, 1110 trig == &data->pre_trig ? "pre-" : "", 1111 bcm_clk->init_data.name); 1112 } 1113 1114 return ret; 1115 } 1116 1117 static u8 kona_peri_clk_get_parent(struct clk_hw *hw) 1118 { 1119 struct kona_clk *bcm_clk = to_kona_clk(hw); 1120 struct peri_clk_data *data = bcm_clk->u.peri; 1121 u8 index; 1122 1123 index = selector_read_index(bcm_clk->ccu, &data->sel); 1124 1125 /* Not all callers would handle an out-of-range value gracefully */ 1126 return index == BAD_CLK_INDEX ? 0 : index; 1127 } 1128 1129 static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, 1130 unsigned long parent_rate) 1131 { 1132 struct kona_clk *bcm_clk = to_kona_clk(hw); 1133 struct peri_clk_data *data = bcm_clk->u.peri; 1134 struct bcm_clk_div *div = &data->div; 1135 u64 scaled_div = 0; 1136 int ret; 1137 1138 if (parent_rate > (unsigned long)LONG_MAX) 1139 return -EINVAL; 1140 1141 if (rate == clk_hw_get_rate(hw)) 1142 return 0; 1143 1144 if (!divider_exists(div)) 1145 return rate == parent_rate ? 0 : -EINVAL; 1146 1147 /* 1148 * A fixed divider can't be changed. (Nor can a fixed 1149 * pre-divider be, but for now we never actually try to 1150 * change that.) Tolerate a request for a no-op change. 1151 */ 1152 if (divider_is_fixed(&data->div)) 1153 return rate == parent_rate ? 0 : -EINVAL; 1154 1155 /* 1156 * Get the scaled divisor value needed to achieve a clock 1157 * rate as close as possible to what was requested, given 1158 * the parent clock rate supplied. 1159 */ 1160 (void)round_rate(bcm_clk->ccu, div, &data->pre_div, 1161 rate ? rate : 1, parent_rate, &scaled_div); 1162 1163 /* 1164 * We aren't updating any pre-divider at this point, so 1165 * we'll use the regular trigger. 1166 */ 1167 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div, 1168 &data->trig, scaled_div); 1169 if (ret == -ENXIO) { 1170 pr_err("%s: gating failure for %s\n", __func__, 1171 bcm_clk->init_data.name); 1172 ret = -EIO; /* Don't proliferate weird errors */ 1173 } else if (ret == -EIO) { 1174 pr_err("%s: trigger failed for %s\n", __func__, 1175 bcm_clk->init_data.name); 1176 } 1177 1178 return ret; 1179 } 1180 1181 struct clk_ops kona_peri_clk_ops = { 1182 .enable = kona_peri_clk_enable, 1183 .disable = kona_peri_clk_disable, 1184 .is_enabled = kona_peri_clk_is_enabled, 1185 .recalc_rate = kona_peri_clk_recalc_rate, 1186 .determine_rate = kona_peri_clk_determine_rate, 1187 .set_parent = kona_peri_clk_set_parent, 1188 .get_parent = kona_peri_clk_get_parent, 1189 .set_rate = kona_peri_clk_set_rate, 1190 }; 1191 1192 /* Put a peripheral clock into its initial state */ 1193 static bool __peri_clk_init(struct kona_clk *bcm_clk) 1194 { 1195 struct ccu_data *ccu = bcm_clk->ccu; 1196 struct peri_clk_data *peri = bcm_clk->u.peri; 1197 const char *name = bcm_clk->init_data.name; 1198 struct bcm_clk_trig *trig; 1199 1200 BUG_ON(bcm_clk->type != bcm_clk_peri); 1201 1202 if (!policy_init(ccu, &peri->policy)) { 1203 pr_err("%s: error initializing policy for %s\n", 1204 __func__, name); 1205 return false; 1206 } 1207 if (!gate_init(ccu, &peri->gate)) { 1208 pr_err("%s: error initializing gate for %s\n", __func__, name); 1209 return false; 1210 } 1211 if (!hyst_init(ccu, &peri->hyst)) { 1212 pr_err("%s: error initializing hyst for %s\n", __func__, name); 1213 return false; 1214 } 1215 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) { 1216 pr_err("%s: error initializing divider for %s\n", __func__, 1217 name); 1218 return false; 1219 } 1220 1221 /* 1222 * For the pre-divider and selector, the pre-trigger is used 1223 * if it's present, otherwise we just use the regular trigger. 1224 */ 1225 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig 1226 : &peri->trig; 1227 1228 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) { 1229 pr_err("%s: error initializing pre-divider for %s\n", __func__, 1230 name); 1231 return false; 1232 } 1233 1234 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) { 1235 pr_err("%s: error initializing selector for %s\n", __func__, 1236 name); 1237 return false; 1238 } 1239 1240 return true; 1241 } 1242 1243 static bool __kona_clk_init(struct kona_clk *bcm_clk) 1244 { 1245 switch (bcm_clk->type) { 1246 case bcm_clk_peri: 1247 return __peri_clk_init(bcm_clk); 1248 default: 1249 BUG(); 1250 } 1251 return false; 1252 } 1253 1254 /* Set a CCU and all its clocks into their desired initial state */ 1255 bool __init kona_ccu_init(struct ccu_data *ccu) 1256 { 1257 unsigned long flags; 1258 unsigned int which; 1259 struct clk **clks = ccu->clk_data.clks; 1260 struct kona_clk *kona_clks = ccu->kona_clks; 1261 bool success = true; 1262 1263 flags = ccu_lock(ccu); 1264 __ccu_write_enable(ccu); 1265 1266 for (which = 0; which < ccu->clk_data.clk_num; which++) { 1267 struct kona_clk *bcm_clk; 1268 1269 if (!clks[which]) 1270 continue; 1271 bcm_clk = &kona_clks[which]; 1272 success &= __kona_clk_init(bcm_clk); 1273 } 1274 1275 __ccu_write_disable(ccu); 1276 ccu_unlock(ccu, flags); 1277 return success; 1278 } 1279