1 /* 2 * Copyright (C) 2013 Broadcom Corporation 3 * Copyright 2013 Linaro Limited 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation version 2. 8 * 9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 * kind, whether express or implied; without even the implied warranty 11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include "clk-kona.h" 16 17 #include <linux/delay.h> 18 #include <linux/kernel.h> 19 20 /* 21 * "Policies" affect the frequencies of bus clocks provided by a 22 * CCU. (I believe these polices are named "Deep Sleep", "Economy", 23 * "Normal", and "Turbo".) A lower policy number has lower power 24 * consumption, and policy 2 is the default. 25 */ 26 #define CCU_POLICY_COUNT 4 27 28 #define CCU_ACCESS_PASSWORD 0xA5A500 29 #define CLK_GATE_DELAY_LOOP 2000 30 31 /* Bitfield operations */ 32 33 /* Produces a mask of set bits covering a range of a 32-bit value */ 34 static inline u32 bitfield_mask(u32 shift, u32 width) 35 { 36 return ((1 << width) - 1) << shift; 37 } 38 39 /* Extract the value of a bitfield found within a given register value */ 40 static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width) 41 { 42 return (reg_val & bitfield_mask(shift, width)) >> shift; 43 } 44 45 /* Replace the value of a bitfield found within a given register value */ 46 static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val) 47 { 48 u32 mask = bitfield_mask(shift, width); 49 50 return (reg_val & ~mask) | (val << shift); 51 } 52 53 /* Divider and scaling helpers */ 54 55 /* Convert a divider into the scaled divisor value it represents. */ 56 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) 57 { 58 return (u64)reg_div + ((u64)1 << div->u.s.frac_width); 59 } 60 61 /* 62 * Build a scaled divider value as close as possible to the 63 * given whole part (div_value) and fractional part (expressed 64 * in billionths). 65 */ 66 u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) 67 { 68 u64 combined; 69 70 BUG_ON(!div_value); 71 BUG_ON(billionths >= BILLION); 72 73 combined = (u64)div_value * BILLION + billionths; 74 combined <<= div->u.s.frac_width; 75 76 return DIV_ROUND_CLOSEST_ULL(combined, BILLION); 77 } 78 79 /* The scaled minimum divisor representable by a divider */ 80 static inline u64 81 scaled_div_min(struct bcm_clk_div *div) 82 { 83 if (divider_is_fixed(div)) 84 return (u64)div->u.fixed; 85 86 return scaled_div_value(div, 0); 87 } 88 89 /* The scaled maximum divisor representable by a divider */ 90 u64 scaled_div_max(struct bcm_clk_div *div) 91 { 92 u32 reg_div; 93 94 if (divider_is_fixed(div)) 95 return (u64)div->u.fixed; 96 97 reg_div = ((u32)1 << div->u.s.width) - 1; 98 99 return scaled_div_value(div, reg_div); 100 } 101 102 /* 103 * Convert a scaled divisor into its divider representation as 104 * stored in a divider register field. 105 */ 106 static inline u32 107 divider(struct bcm_clk_div *div, u64 scaled_div) 108 { 109 BUG_ON(scaled_div < scaled_div_min(div)); 110 BUG_ON(scaled_div > scaled_div_max(div)); 111 112 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width)); 113 } 114 115 /* Return a rate scaled for use when dividing by a scaled divisor. */ 116 static inline u64 117 scale_rate(struct bcm_clk_div *div, u32 rate) 118 { 119 if (divider_is_fixed(div)) 120 return (u64)rate; 121 122 return (u64)rate << div->u.s.frac_width; 123 } 124 125 /* CCU access */ 126 127 /* Read a 32-bit register value from a CCU's address space. */ 128 static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset) 129 { 130 return readl(ccu->base + reg_offset); 131 } 132 133 /* Write a 32-bit register value into a CCU's address space. */ 134 static inline void 135 __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val) 136 { 137 writel(reg_val, ccu->base + reg_offset); 138 } 139 140 static inline unsigned long ccu_lock(struct ccu_data *ccu) 141 { 142 unsigned long flags; 143 144 spin_lock_irqsave(&ccu->lock, flags); 145 146 return flags; 147 } 148 static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags) 149 { 150 spin_unlock_irqrestore(&ccu->lock, flags); 151 } 152 153 /* 154 * Enable/disable write access to CCU protected registers. The 155 * WR_ACCESS register for all CCUs is at offset 0. 156 */ 157 static inline void __ccu_write_enable(struct ccu_data *ccu) 158 { 159 if (ccu->write_enabled) { 160 pr_err("%s: access already enabled for %s\n", __func__, 161 ccu->name); 162 return; 163 } 164 ccu->write_enabled = true; 165 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1); 166 } 167 168 static inline void __ccu_write_disable(struct ccu_data *ccu) 169 { 170 if (!ccu->write_enabled) { 171 pr_err("%s: access wasn't enabled for %s\n", __func__, 172 ccu->name); 173 return; 174 } 175 176 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD); 177 ccu->write_enabled = false; 178 } 179 180 /* 181 * Poll a register in a CCU's address space, returning when the 182 * specified bit in that register's value is set (or clear). Delay 183 * a microsecond after each read of the register. Returns true if 184 * successful, or false if we gave up trying. 185 * 186 * Caller must ensure the CCU lock is held. 187 */ 188 static inline bool 189 __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want) 190 { 191 unsigned int tries; 192 u32 bit_mask = 1 << bit; 193 194 for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) { 195 u32 val; 196 bool bit_val; 197 198 val = __ccu_read(ccu, reg_offset); 199 bit_val = (val & bit_mask) != 0; 200 if (bit_val == want) 201 return true; 202 udelay(1); 203 } 204 pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__, 205 ccu->name, reg_offset, bit, want ? "set" : "clear"); 206 207 return false; 208 } 209 210 /* Policy operations */ 211 212 static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync) 213 { 214 struct bcm_policy_ctl *control = &ccu->policy.control; 215 u32 offset; 216 u32 go_bit; 217 u32 mask; 218 bool ret; 219 220 /* If we don't need to control policy for this CCU, we're done. */ 221 if (!policy_ctl_exists(control)) 222 return true; 223 224 offset = control->offset; 225 go_bit = control->go_bit; 226 227 /* Ensure we're not busy before we start */ 228 ret = __ccu_wait_bit(ccu, offset, go_bit, false); 229 if (!ret) { 230 pr_err("%s: ccu %s policy engine wouldn't go idle\n", 231 __func__, ccu->name); 232 return false; 233 } 234 235 /* 236 * If it's a synchronous request, we'll wait for the voltage 237 * and frequency of the active load to stabilize before 238 * returning. To do this we select the active load by 239 * setting the ATL bit. 240 * 241 * An asynchronous request instead ramps the voltage in the 242 * background, and when that process stabilizes, the target 243 * load is copied to the active load and the CCU frequency 244 * is switched. We do this by selecting the target load 245 * (ATL bit clear) and setting the request auto-copy (AC bit 246 * set). 247 * 248 * Note, we do NOT read-modify-write this register. 249 */ 250 mask = (u32)1 << go_bit; 251 if (sync) 252 mask |= 1 << control->atl_bit; 253 else 254 mask |= 1 << control->ac_bit; 255 __ccu_write(ccu, offset, mask); 256 257 /* Wait for indication that operation is complete. */ 258 ret = __ccu_wait_bit(ccu, offset, go_bit, false); 259 if (!ret) 260 pr_err("%s: ccu %s policy engine never started\n", 261 __func__, ccu->name); 262 263 return ret; 264 } 265 266 static bool __ccu_policy_engine_stop(struct ccu_data *ccu) 267 { 268 struct bcm_lvm_en *enable = &ccu->policy.enable; 269 u32 offset; 270 u32 enable_bit; 271 bool ret; 272 273 /* If we don't need to control policy for this CCU, we're done. */ 274 if (!policy_lvm_en_exists(enable)) 275 return true; 276 277 /* Ensure we're not busy before we start */ 278 offset = enable->offset; 279 enable_bit = enable->bit; 280 ret = __ccu_wait_bit(ccu, offset, enable_bit, false); 281 if (!ret) { 282 pr_err("%s: ccu %s policy engine already stopped\n", 283 __func__, ccu->name); 284 return false; 285 } 286 287 /* Now set the bit to stop the engine (NO read-modify-write) */ 288 __ccu_write(ccu, offset, (u32)1 << enable_bit); 289 290 /* Wait for indication that it has stopped. */ 291 ret = __ccu_wait_bit(ccu, offset, enable_bit, false); 292 if (!ret) 293 pr_err("%s: ccu %s policy engine never stopped\n", 294 __func__, ccu->name); 295 296 return ret; 297 } 298 299 /* 300 * A CCU has four operating conditions ("policies"), and some clocks 301 * can be disabled or enabled based on which policy is currently in 302 * effect. Such clocks have a bit in a "policy mask" register for 303 * each policy indicating whether the clock is enabled for that 304 * policy or not. The bit position for a clock is the same for all 305 * four registers, and the 32-bit registers are at consecutive 306 * addresses. 307 */ 308 static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy) 309 { 310 u32 offset; 311 u32 mask; 312 int i; 313 bool ret; 314 315 if (!policy_exists(policy)) 316 return true; 317 318 /* 319 * We need to stop the CCU policy engine to allow update 320 * of our policy bits. 321 */ 322 if (!__ccu_policy_engine_stop(ccu)) { 323 pr_err("%s: unable to stop CCU %s policy engine\n", 324 __func__, ccu->name); 325 return false; 326 } 327 328 /* 329 * For now, if a clock defines its policy bit we just mark 330 * it "enabled" for all four policies. 331 */ 332 offset = policy->offset; 333 mask = (u32)1 << policy->bit; 334 for (i = 0; i < CCU_POLICY_COUNT; i++) { 335 u32 reg_val; 336 337 reg_val = __ccu_read(ccu, offset); 338 reg_val |= mask; 339 __ccu_write(ccu, offset, reg_val); 340 offset += sizeof(u32); 341 } 342 343 /* We're done updating; fire up the policy engine again. */ 344 ret = __ccu_policy_engine_start(ccu, true); 345 if (!ret) 346 pr_err("%s: unable to restart CCU %s policy engine\n", 347 __func__, ccu->name); 348 349 return ret; 350 } 351 352 /* Gate operations */ 353 354 /* Determine whether a clock is gated. CCU lock must be held. */ 355 static bool 356 __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) 357 { 358 u32 bit_mask; 359 u32 reg_val; 360 361 /* If there is no gate we can assume it's enabled. */ 362 if (!gate_exists(gate)) 363 return true; 364 365 bit_mask = 1 << gate->status_bit; 366 reg_val = __ccu_read(ccu, gate->offset); 367 368 return (reg_val & bit_mask) != 0; 369 } 370 371 /* Determine whether a clock is gated. */ 372 static bool 373 is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) 374 { 375 long flags; 376 bool ret; 377 378 /* Avoid taking the lock if we can */ 379 if (!gate_exists(gate)) 380 return true; 381 382 flags = ccu_lock(ccu); 383 ret = __is_clk_gate_enabled(ccu, gate); 384 ccu_unlock(ccu, flags); 385 386 return ret; 387 } 388 389 /* 390 * Commit our desired gate state to the hardware. 391 * Returns true if successful, false otherwise. 392 */ 393 static bool 394 __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate) 395 { 396 u32 reg_val; 397 u32 mask; 398 bool enabled = false; 399 400 BUG_ON(!gate_exists(gate)); 401 if (!gate_is_sw_controllable(gate)) 402 return true; /* Nothing we can change */ 403 404 reg_val = __ccu_read(ccu, gate->offset); 405 406 /* For a hardware/software gate, set which is in control */ 407 if (gate_is_hw_controllable(gate)) { 408 mask = (u32)1 << gate->hw_sw_sel_bit; 409 if (gate_is_sw_managed(gate)) 410 reg_val |= mask; 411 else 412 reg_val &= ~mask; 413 } 414 415 /* 416 * If software is in control, enable or disable the gate. 417 * If hardware is, clear the enabled bit for good measure. 418 * If a software controlled gate can't be disabled, we're 419 * required to write a 0 into the enable bit (but the gate 420 * will be enabled). 421 */ 422 mask = (u32)1 << gate->en_bit; 423 if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) && 424 !gate_is_no_disable(gate)) 425 reg_val |= mask; 426 else 427 reg_val &= ~mask; 428 429 __ccu_write(ccu, gate->offset, reg_val); 430 431 /* For a hardware controlled gate, we're done */ 432 if (!gate_is_sw_managed(gate)) 433 return true; 434 435 /* Otherwise wait for the gate to be in desired state */ 436 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled); 437 } 438 439 /* 440 * Initialize a gate. Our desired state (hardware/software select, 441 * and if software, its enable state) is committed to hardware 442 * without the usual checks to see if it's already set up that way. 443 * Returns true if successful, false otherwise. 444 */ 445 static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate) 446 { 447 if (!gate_exists(gate)) 448 return true; 449 return __gate_commit(ccu, gate); 450 } 451 452 /* 453 * Set a gate to enabled or disabled state. Does nothing if the 454 * gate is not currently under software control, or if it is already 455 * in the requested state. Returns true if successful, false 456 * otherwise. CCU lock must be held. 457 */ 458 static bool 459 __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable) 460 { 461 bool ret; 462 463 if (!gate_exists(gate) || !gate_is_sw_managed(gate)) 464 return true; /* Nothing to do */ 465 466 if (!enable && gate_is_no_disable(gate)) { 467 pr_warn("%s: invalid gate disable request (ignoring)\n", 468 __func__); 469 return true; 470 } 471 472 if (enable == gate_is_enabled(gate)) 473 return true; /* No change */ 474 475 gate_flip_enabled(gate); 476 ret = __gate_commit(ccu, gate); 477 if (!ret) 478 gate_flip_enabled(gate); /* Revert the change */ 479 480 return ret; 481 } 482 483 /* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */ 484 static int clk_gate(struct ccu_data *ccu, const char *name, 485 struct bcm_clk_gate *gate, bool enable) 486 { 487 unsigned long flags; 488 bool success; 489 490 /* 491 * Avoid taking the lock if we can. We quietly ignore 492 * requests to change state that don't make sense. 493 */ 494 if (!gate_exists(gate) || !gate_is_sw_managed(gate)) 495 return 0; 496 if (!enable && gate_is_no_disable(gate)) 497 return 0; 498 499 flags = ccu_lock(ccu); 500 __ccu_write_enable(ccu); 501 502 success = __clk_gate(ccu, gate, enable); 503 504 __ccu_write_disable(ccu); 505 ccu_unlock(ccu, flags); 506 507 if (success) 508 return 0; 509 510 pr_err("%s: failed to %s gate for %s\n", __func__, 511 enable ? "enable" : "disable", name); 512 513 return -EIO; 514 } 515 516 /* Hysteresis operations */ 517 518 /* 519 * If a clock gate requires a turn-off delay it will have 520 * "hysteresis" register bits defined. The first, if set, enables 521 * the delay; and if enabled, the second bit determines whether the 522 * delay is "low" or "high" (1 means high). For now, if it's 523 * defined for a clock, we set it. 524 */ 525 static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst) 526 { 527 u32 offset; 528 u32 reg_val; 529 u32 mask; 530 531 if (!hyst_exists(hyst)) 532 return true; 533 534 offset = hyst->offset; 535 mask = (u32)1 << hyst->en_bit; 536 mask |= (u32)1 << hyst->val_bit; 537 538 reg_val = __ccu_read(ccu, offset); 539 reg_val |= mask; 540 __ccu_write(ccu, offset, reg_val); 541 542 return true; 543 } 544 545 /* Trigger operations */ 546 547 /* 548 * Caller must ensure CCU lock is held and access is enabled. 549 * Returns true if successful, false otherwise. 550 */ 551 static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig) 552 { 553 /* Trigger the clock and wait for it to finish */ 554 __ccu_write(ccu, trig->offset, 1 << trig->bit); 555 556 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false); 557 } 558 559 /* Divider operations */ 560 561 /* Read a divider value and return the scaled divisor it represents. */ 562 static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) 563 { 564 unsigned long flags; 565 u32 reg_val; 566 u32 reg_div; 567 568 if (divider_is_fixed(div)) 569 return (u64)div->u.fixed; 570 571 flags = ccu_lock(ccu); 572 reg_val = __ccu_read(ccu, div->u.s.offset); 573 ccu_unlock(ccu, flags); 574 575 /* Extract the full divider field from the register value */ 576 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width); 577 578 /* Return the scaled divisor value it represents */ 579 return scaled_div_value(div, reg_div); 580 } 581 582 /* 583 * Convert a divider's scaled divisor value into its recorded form 584 * and commit it into the hardware divider register. 585 * 586 * Returns 0 on success. Returns -EINVAL for invalid arguments. 587 * Returns -ENXIO if gating failed, and -EIO if a trigger failed. 588 */ 589 static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, 590 struct bcm_clk_div *div, struct bcm_clk_trig *trig) 591 { 592 bool enabled; 593 u32 reg_div; 594 u32 reg_val; 595 int ret = 0; 596 597 BUG_ON(divider_is_fixed(div)); 598 599 /* 600 * If we're just initializing the divider, and no initial 601 * state was defined in the device tree, we just find out 602 * what its current value is rather than updating it. 603 */ 604 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) { 605 reg_val = __ccu_read(ccu, div->u.s.offset); 606 reg_div = bitfield_extract(reg_val, div->u.s.shift, 607 div->u.s.width); 608 div->u.s.scaled_div = scaled_div_value(div, reg_div); 609 610 return 0; 611 } 612 613 /* Convert the scaled divisor to the value we need to record */ 614 reg_div = divider(div, div->u.s.scaled_div); 615 616 /* Clock needs to be enabled before changing the rate */ 617 enabled = __is_clk_gate_enabled(ccu, gate); 618 if (!enabled && !__clk_gate(ccu, gate, true)) { 619 ret = -ENXIO; 620 goto out; 621 } 622 623 /* Replace the divider value and record the result */ 624 reg_val = __ccu_read(ccu, div->u.s.offset); 625 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width, 626 reg_div); 627 __ccu_write(ccu, div->u.s.offset, reg_val); 628 629 /* If the trigger fails we still want to disable the gate */ 630 if (!__clk_trigger(ccu, trig)) 631 ret = -EIO; 632 633 /* Disable the clock again if it was disabled to begin with */ 634 if (!enabled && !__clk_gate(ccu, gate, false)) 635 ret = ret ? ret : -ENXIO; /* return first error */ 636 out: 637 return ret; 638 } 639 640 /* 641 * Initialize a divider by committing our desired state to hardware 642 * without the usual checks to see if it's already set up that way. 643 * Returns true if successful, false otherwise. 644 */ 645 static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, 646 struct bcm_clk_div *div, struct bcm_clk_trig *trig) 647 { 648 if (!divider_exists(div) || divider_is_fixed(div)) 649 return true; 650 return !__div_commit(ccu, gate, div, trig); 651 } 652 653 static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, 654 struct bcm_clk_div *div, struct bcm_clk_trig *trig, 655 u64 scaled_div) 656 { 657 unsigned long flags; 658 u64 previous; 659 int ret; 660 661 BUG_ON(divider_is_fixed(div)); 662 663 previous = div->u.s.scaled_div; 664 if (previous == scaled_div) 665 return 0; /* No change */ 666 667 div->u.s.scaled_div = scaled_div; 668 669 flags = ccu_lock(ccu); 670 __ccu_write_enable(ccu); 671 672 ret = __div_commit(ccu, gate, div, trig); 673 674 __ccu_write_disable(ccu); 675 ccu_unlock(ccu, flags); 676 677 if (ret) 678 div->u.s.scaled_div = previous; /* Revert the change */ 679 680 return ret; 681 682 } 683 684 /* Common clock rate helpers */ 685 686 /* 687 * Implement the common clock framework recalc_rate method, taking 688 * into account a divider and an optional pre-divider. The 689 * pre-divider register pointer may be NULL. 690 */ 691 static unsigned long clk_recalc_rate(struct ccu_data *ccu, 692 struct bcm_clk_div *div, struct bcm_clk_div *pre_div, 693 unsigned long parent_rate) 694 { 695 u64 scaled_parent_rate; 696 u64 scaled_div; 697 u64 result; 698 699 if (!divider_exists(div)) 700 return parent_rate; 701 702 if (parent_rate > (unsigned long)LONG_MAX) 703 return 0; /* actually this would be a caller bug */ 704 705 /* 706 * If there is a pre-divider, divide the scaled parent rate 707 * by the pre-divider value first. In this case--to improve 708 * accuracy--scale the parent rate by *both* the pre-divider 709 * value and the divider before actually computing the 710 * result of the pre-divider. 711 * 712 * If there's only one divider, just scale the parent rate. 713 */ 714 if (pre_div && divider_exists(pre_div)) { 715 u64 scaled_rate; 716 717 scaled_rate = scale_rate(pre_div, parent_rate); 718 scaled_rate = scale_rate(div, scaled_rate); 719 scaled_div = divider_read_scaled(ccu, pre_div); 720 scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate, 721 scaled_div); 722 } else { 723 scaled_parent_rate = scale_rate(div, parent_rate); 724 } 725 726 /* 727 * Get the scaled divisor value, and divide the scaled 728 * parent rate by that to determine this clock's resulting 729 * rate. 730 */ 731 scaled_div = divider_read_scaled(ccu, div); 732 result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div); 733 734 return (unsigned long)result; 735 } 736 737 /* 738 * Compute the output rate produced when a given parent rate is fed 739 * into two dividers. The pre-divider can be NULL, and even if it's 740 * non-null it may be nonexistent. It's also OK for the divider to 741 * be nonexistent, and in that case the pre-divider is also ignored. 742 * 743 * If scaled_div is non-null, it is used to return the scaled divisor 744 * value used by the (downstream) divider to produce that rate. 745 */ 746 static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div, 747 struct bcm_clk_div *pre_div, 748 unsigned long rate, unsigned long parent_rate, 749 u64 *scaled_div) 750 { 751 u64 scaled_parent_rate; 752 u64 min_scaled_div; 753 u64 max_scaled_div; 754 u64 best_scaled_div; 755 u64 result; 756 757 BUG_ON(!divider_exists(div)); 758 BUG_ON(!rate); 759 BUG_ON(parent_rate > (u64)LONG_MAX); 760 761 /* 762 * If there is a pre-divider, divide the scaled parent rate 763 * by the pre-divider value first. In this case--to improve 764 * accuracy--scale the parent rate by *both* the pre-divider 765 * value and the divider before actually computing the 766 * result of the pre-divider. 767 * 768 * If there's only one divider, just scale the parent rate. 769 * 770 * For simplicity we treat the pre-divider as fixed (for now). 771 */ 772 if (divider_exists(pre_div)) { 773 u64 scaled_rate; 774 u64 scaled_pre_div; 775 776 scaled_rate = scale_rate(pre_div, parent_rate); 777 scaled_rate = scale_rate(div, scaled_rate); 778 scaled_pre_div = divider_read_scaled(ccu, pre_div); 779 scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate, 780 scaled_pre_div); 781 } else { 782 scaled_parent_rate = scale_rate(div, parent_rate); 783 } 784 785 /* 786 * Compute the best possible divider and ensure it is in 787 * range. A fixed divider can't be changed, so just report 788 * the best we can do. 789 */ 790 if (!divider_is_fixed(div)) { 791 best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, 792 rate); 793 min_scaled_div = scaled_div_min(div); 794 max_scaled_div = scaled_div_max(div); 795 if (best_scaled_div > max_scaled_div) 796 best_scaled_div = max_scaled_div; 797 else if (best_scaled_div < min_scaled_div) 798 best_scaled_div = min_scaled_div; 799 } else { 800 best_scaled_div = divider_read_scaled(ccu, div); 801 } 802 803 /* OK, figure out the resulting rate */ 804 result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div); 805 806 if (scaled_div) 807 *scaled_div = best_scaled_div; 808 809 return (long)result; 810 } 811 812 /* Common clock parent helpers */ 813 814 /* 815 * For a given parent selector (register field) value, find the 816 * index into a selector's parent_sel array that contains it. 817 * Returns the index, or BAD_CLK_INDEX if it's not found. 818 */ 819 static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel) 820 { 821 u8 i; 822 823 BUG_ON(sel->parent_count > (u32)U8_MAX); 824 for (i = 0; i < sel->parent_count; i++) 825 if (sel->parent_sel[i] == parent_sel) 826 return i; 827 return BAD_CLK_INDEX; 828 } 829 830 /* 831 * Fetch the current value of the selector, and translate that into 832 * its corresponding index in the parent array we registered with 833 * the clock framework. 834 * 835 * Returns parent array index that corresponds with the value found, 836 * or BAD_CLK_INDEX if the found value is out of range. 837 */ 838 static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel) 839 { 840 unsigned long flags; 841 u32 reg_val; 842 u32 parent_sel; 843 u8 index; 844 845 /* If there's no selector, there's only one parent */ 846 if (!selector_exists(sel)) 847 return 0; 848 849 /* Get the value in the selector register */ 850 flags = ccu_lock(ccu); 851 reg_val = __ccu_read(ccu, sel->offset); 852 ccu_unlock(ccu, flags); 853 854 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); 855 856 /* Look up that selector's parent array index and return it */ 857 index = parent_index(sel, parent_sel); 858 if (index == BAD_CLK_INDEX) 859 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n", 860 __func__, parent_sel, ccu->name, sel->offset); 861 862 return index; 863 } 864 865 /* 866 * Commit our desired selector value to the hardware. 867 * 868 * Returns 0 on success. Returns -EINVAL for invalid arguments. 869 * Returns -ENXIO if gating failed, and -EIO if a trigger failed. 870 */ 871 static int 872 __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, 873 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) 874 { 875 u32 parent_sel; 876 u32 reg_val; 877 bool enabled; 878 int ret = 0; 879 880 BUG_ON(!selector_exists(sel)); 881 882 /* 883 * If we're just initializing the selector, and no initial 884 * state was defined in the device tree, we just find out 885 * what its current value is rather than updating it. 886 */ 887 if (sel->clk_index == BAD_CLK_INDEX) { 888 u8 index; 889 890 reg_val = __ccu_read(ccu, sel->offset); 891 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); 892 index = parent_index(sel, parent_sel); 893 if (index == BAD_CLK_INDEX) 894 return -EINVAL; 895 sel->clk_index = index; 896 897 return 0; 898 } 899 900 BUG_ON((u32)sel->clk_index >= sel->parent_count); 901 parent_sel = sel->parent_sel[sel->clk_index]; 902 903 /* Clock needs to be enabled before changing the parent */ 904 enabled = __is_clk_gate_enabled(ccu, gate); 905 if (!enabled && !__clk_gate(ccu, gate, true)) 906 return -ENXIO; 907 908 /* Replace the selector value and record the result */ 909 reg_val = __ccu_read(ccu, sel->offset); 910 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel); 911 __ccu_write(ccu, sel->offset, reg_val); 912 913 /* If the trigger fails we still want to disable the gate */ 914 if (!__clk_trigger(ccu, trig)) 915 ret = -EIO; 916 917 /* Disable the clock again if it was disabled to begin with */ 918 if (!enabled && !__clk_gate(ccu, gate, false)) 919 ret = ret ? ret : -ENXIO; /* return first error */ 920 921 return ret; 922 } 923 924 /* 925 * Initialize a selector by committing our desired state to hardware 926 * without the usual checks to see if it's already set up that way. 927 * Returns true if successful, false otherwise. 928 */ 929 static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, 930 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) 931 { 932 if (!selector_exists(sel)) 933 return true; 934 return !__sel_commit(ccu, gate, sel, trig); 935 } 936 937 /* 938 * Write a new value into a selector register to switch to a 939 * different parent clock. Returns 0 on success, or an error code 940 * (from __sel_commit()) otherwise. 941 */ 942 static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, 943 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig, 944 u8 index) 945 { 946 unsigned long flags; 947 u8 previous; 948 int ret; 949 950 previous = sel->clk_index; 951 if (previous == index) 952 return 0; /* No change */ 953 954 sel->clk_index = index; 955 956 flags = ccu_lock(ccu); 957 __ccu_write_enable(ccu); 958 959 ret = __sel_commit(ccu, gate, sel, trig); 960 961 __ccu_write_disable(ccu); 962 ccu_unlock(ccu, flags); 963 964 if (ret) 965 sel->clk_index = previous; /* Revert the change */ 966 967 return ret; 968 } 969 970 /* Clock operations */ 971 972 static int kona_peri_clk_enable(struct clk_hw *hw) 973 { 974 struct kona_clk *bcm_clk = to_kona_clk(hw); 975 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 976 977 return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true); 978 } 979 980 static void kona_peri_clk_disable(struct clk_hw *hw) 981 { 982 struct kona_clk *bcm_clk = to_kona_clk(hw); 983 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 984 985 (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false); 986 } 987 988 static int kona_peri_clk_is_enabled(struct clk_hw *hw) 989 { 990 struct kona_clk *bcm_clk = to_kona_clk(hw); 991 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 992 993 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; 994 } 995 996 static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, 997 unsigned long parent_rate) 998 { 999 struct kona_clk *bcm_clk = to_kona_clk(hw); 1000 struct peri_clk_data *data = bcm_clk->u.peri; 1001 1002 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, 1003 parent_rate); 1004 } 1005 1006 static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, 1007 unsigned long *parent_rate) 1008 { 1009 struct kona_clk *bcm_clk = to_kona_clk(hw); 1010 struct bcm_clk_div *div = &bcm_clk->u.peri->div; 1011 1012 if (!divider_exists(div)) 1013 return __clk_get_rate(hw->clk); 1014 1015 /* Quietly avoid a zero rate */ 1016 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, 1017 rate ? rate : 1, *parent_rate, NULL); 1018 } 1019 1020 static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 1021 unsigned long min_rate, 1022 unsigned long max_rate, 1023 unsigned long *best_parent_rate, struct clk_hw **best_parent) 1024 { 1025 struct kona_clk *bcm_clk = to_kona_clk(hw); 1026 struct clk *clk = hw->clk; 1027 struct clk *current_parent; 1028 unsigned long parent_rate; 1029 unsigned long best_delta; 1030 unsigned long best_rate; 1031 u32 parent_count; 1032 u32 which; 1033 1034 /* 1035 * If there is no other parent to choose, use the current one. 1036 * Note: We don't honor (or use) CLK_SET_RATE_NO_REPARENT. 1037 */ 1038 WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT); 1039 parent_count = (u32)bcm_clk->init_data.num_parents; 1040 if (parent_count < 2) 1041 return kona_peri_clk_round_rate(hw, rate, best_parent_rate); 1042 1043 /* Unless we can do better, stick with current parent */ 1044 current_parent = clk_get_parent(clk); 1045 parent_rate = __clk_get_rate(current_parent); 1046 best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate); 1047 best_delta = abs(best_rate - rate); 1048 1049 /* Check whether any other parent clock can produce a better result */ 1050 for (which = 0; which < parent_count; which++) { 1051 struct clk *parent = clk_get_parent_by_index(clk, which); 1052 unsigned long delta; 1053 unsigned long other_rate; 1054 1055 BUG_ON(!parent); 1056 if (parent == current_parent) 1057 continue; 1058 1059 /* We don't support CLK_SET_RATE_PARENT */ 1060 parent_rate = __clk_get_rate(parent); 1061 other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate); 1062 delta = abs(other_rate - rate); 1063 if (delta < best_delta) { 1064 best_delta = delta; 1065 best_rate = other_rate; 1066 *best_parent = __clk_get_hw(parent); 1067 *best_parent_rate = parent_rate; 1068 } 1069 } 1070 1071 return best_rate; 1072 } 1073 1074 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) 1075 { 1076 struct kona_clk *bcm_clk = to_kona_clk(hw); 1077 struct peri_clk_data *data = bcm_clk->u.peri; 1078 struct bcm_clk_sel *sel = &data->sel; 1079 struct bcm_clk_trig *trig; 1080 int ret; 1081 1082 BUG_ON(index >= sel->parent_count); 1083 1084 /* If there's only one parent we don't require a selector */ 1085 if (!selector_exists(sel)) 1086 return 0; 1087 1088 /* 1089 * The regular trigger is used by default, but if there's a 1090 * pre-trigger we want to use that instead. 1091 */ 1092 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig 1093 : &data->trig; 1094 1095 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index); 1096 if (ret == -ENXIO) { 1097 pr_err("%s: gating failure for %s\n", __func__, 1098 bcm_clk->init_data.name); 1099 ret = -EIO; /* Don't proliferate weird errors */ 1100 } else if (ret == -EIO) { 1101 pr_err("%s: %strigger failed for %s\n", __func__, 1102 trig == &data->pre_trig ? "pre-" : "", 1103 bcm_clk->init_data.name); 1104 } 1105 1106 return ret; 1107 } 1108 1109 static u8 kona_peri_clk_get_parent(struct clk_hw *hw) 1110 { 1111 struct kona_clk *bcm_clk = to_kona_clk(hw); 1112 struct peri_clk_data *data = bcm_clk->u.peri; 1113 u8 index; 1114 1115 index = selector_read_index(bcm_clk->ccu, &data->sel); 1116 1117 /* Not all callers would handle an out-of-range value gracefully */ 1118 return index == BAD_CLK_INDEX ? 0 : index; 1119 } 1120 1121 static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, 1122 unsigned long parent_rate) 1123 { 1124 struct kona_clk *bcm_clk = to_kona_clk(hw); 1125 struct peri_clk_data *data = bcm_clk->u.peri; 1126 struct bcm_clk_div *div = &data->div; 1127 u64 scaled_div = 0; 1128 int ret; 1129 1130 if (parent_rate > (unsigned long)LONG_MAX) 1131 return -EINVAL; 1132 1133 if (rate == __clk_get_rate(hw->clk)) 1134 return 0; 1135 1136 if (!divider_exists(div)) 1137 return rate == parent_rate ? 0 : -EINVAL; 1138 1139 /* 1140 * A fixed divider can't be changed. (Nor can a fixed 1141 * pre-divider be, but for now we never actually try to 1142 * change that.) Tolerate a request for a no-op change. 1143 */ 1144 if (divider_is_fixed(&data->div)) 1145 return rate == parent_rate ? 0 : -EINVAL; 1146 1147 /* 1148 * Get the scaled divisor value needed to achieve a clock 1149 * rate as close as possible to what was requested, given 1150 * the parent clock rate supplied. 1151 */ 1152 (void)round_rate(bcm_clk->ccu, div, &data->pre_div, 1153 rate ? rate : 1, parent_rate, &scaled_div); 1154 1155 /* 1156 * We aren't updating any pre-divider at this point, so 1157 * we'll use the regular trigger. 1158 */ 1159 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div, 1160 &data->trig, scaled_div); 1161 if (ret == -ENXIO) { 1162 pr_err("%s: gating failure for %s\n", __func__, 1163 bcm_clk->init_data.name); 1164 ret = -EIO; /* Don't proliferate weird errors */ 1165 } else if (ret == -EIO) { 1166 pr_err("%s: trigger failed for %s\n", __func__, 1167 bcm_clk->init_data.name); 1168 } 1169 1170 return ret; 1171 } 1172 1173 struct clk_ops kona_peri_clk_ops = { 1174 .enable = kona_peri_clk_enable, 1175 .disable = kona_peri_clk_disable, 1176 .is_enabled = kona_peri_clk_is_enabled, 1177 .recalc_rate = kona_peri_clk_recalc_rate, 1178 .determine_rate = kona_peri_clk_determine_rate, 1179 .set_parent = kona_peri_clk_set_parent, 1180 .get_parent = kona_peri_clk_get_parent, 1181 .set_rate = kona_peri_clk_set_rate, 1182 }; 1183 1184 /* Put a peripheral clock into its initial state */ 1185 static bool __peri_clk_init(struct kona_clk *bcm_clk) 1186 { 1187 struct ccu_data *ccu = bcm_clk->ccu; 1188 struct peri_clk_data *peri = bcm_clk->u.peri; 1189 const char *name = bcm_clk->init_data.name; 1190 struct bcm_clk_trig *trig; 1191 1192 BUG_ON(bcm_clk->type != bcm_clk_peri); 1193 1194 if (!policy_init(ccu, &peri->policy)) { 1195 pr_err("%s: error initializing policy for %s\n", 1196 __func__, name); 1197 return false; 1198 } 1199 if (!gate_init(ccu, &peri->gate)) { 1200 pr_err("%s: error initializing gate for %s\n", __func__, name); 1201 return false; 1202 } 1203 if (!hyst_init(ccu, &peri->hyst)) { 1204 pr_err("%s: error initializing hyst for %s\n", __func__, name); 1205 return false; 1206 } 1207 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) { 1208 pr_err("%s: error initializing divider for %s\n", __func__, 1209 name); 1210 return false; 1211 } 1212 1213 /* 1214 * For the pre-divider and selector, the pre-trigger is used 1215 * if it's present, otherwise we just use the regular trigger. 1216 */ 1217 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig 1218 : &peri->trig; 1219 1220 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) { 1221 pr_err("%s: error initializing pre-divider for %s\n", __func__, 1222 name); 1223 return false; 1224 } 1225 1226 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) { 1227 pr_err("%s: error initializing selector for %s\n", __func__, 1228 name); 1229 return false; 1230 } 1231 1232 return true; 1233 } 1234 1235 static bool __kona_clk_init(struct kona_clk *bcm_clk) 1236 { 1237 switch (bcm_clk->type) { 1238 case bcm_clk_peri: 1239 return __peri_clk_init(bcm_clk); 1240 default: 1241 BUG(); 1242 } 1243 return -EINVAL; 1244 } 1245 1246 /* Set a CCU and all its clocks into their desired initial state */ 1247 bool __init kona_ccu_init(struct ccu_data *ccu) 1248 { 1249 unsigned long flags; 1250 unsigned int which; 1251 struct clk **clks = ccu->clk_data.clks; 1252 bool success = true; 1253 1254 flags = ccu_lock(ccu); 1255 __ccu_write_enable(ccu); 1256 1257 for (which = 0; which < ccu->clk_data.clk_num; which++) { 1258 struct kona_clk *bcm_clk; 1259 1260 if (!clks[which]) 1261 continue; 1262 bcm_clk = to_kona_clk(__clk_get_hw(clks[which])); 1263 success &= __kona_clk_init(bcm_clk); 1264 } 1265 1266 __ccu_write_disable(ccu); 1267 ccu_unlock(ccu, flags); 1268 return success; 1269 } 1270