1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mach-omap1/clock.c 4 * 5 * Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation 6 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> 7 * 8 * Modified to use omap shared clock framework by 9 * Tony Lindgren <tony@atomide.com> 10 */ 11 #include <linux/kernel.h> 12 #include <linux/export.h> 13 #include <linux/list.h> 14 #include <linux/errno.h> 15 #include <linux/err.h> 16 #include <linux/io.h> 17 #include <linux/clk.h> 18 #include <linux/clkdev.h> 19 #include <linux/soc/ti/omap1-io.h> 20 21 #include <asm/mach-types.h> 22 23 #include "hardware.h" 24 #include "soc.h" 25 #include "iomap.h" 26 #include "clock.h" 27 #include "opp.h" 28 #include "sram.h" 29 30 __u32 arm_idlect1_mask; 31 struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p; 32 33 static LIST_HEAD(clocks); 34 static DEFINE_MUTEX(clocks_mutex); 35 static DEFINE_SPINLOCK(clockfw_lock); 36 37 /* 38 * Omap1 specific clock functions 39 */ 40 41 unsigned long omap1_uart_recalc(struct clk *clk) 42 { 43 unsigned int val = __raw_readl(clk->enable_reg); 44 return val & 1 << clk->enable_bit ? 48000000 : 12000000; 45 } 46 47 unsigned long omap1_sossi_recalc(struct clk *clk) 48 { 49 u32 div = omap_readl(MOD_CONF_CTRL_1); 50 51 div = (div >> 17) & 0x7; 52 div++; 53 54 return clk->parent->rate / div; 55 } 56 57 static void omap1_clk_allow_idle(struct clk *clk) 58 { 59 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk; 60 61 if (!(clk->flags & CLOCK_IDLE_CONTROL)) 62 return; 63 64 if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count)) 65 arm_idlect1_mask |= 1 << iclk->idlect_shift; 66 } 67 68 static void omap1_clk_deny_idle(struct clk *clk) 69 { 70 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk; 71 72 if (!(clk->flags & CLOCK_IDLE_CONTROL)) 73 return; 74 75 if (iclk->no_idle_count++ == 0) 76 arm_idlect1_mask &= ~(1 << iclk->idlect_shift); 77 } 78 79 static __u16 verify_ckctl_value(__u16 newval) 80 { 81 /* This function checks for following limitations set 82 * by the hardware (all conditions must be true): 83 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2 84 * ARM_CK >= TC_CK 85 * DSP_CK >= TC_CK 86 * DSPMMU_CK >= TC_CK 87 * 88 * In addition following rules are enforced: 89 * LCD_CK <= TC_CK 90 * ARMPER_CK <= TC_CK 91 * 92 * However, maximum frequencies are not checked for! 93 */ 94 __u8 per_exp; 95 __u8 lcd_exp; 96 __u8 arm_exp; 97 __u8 dsp_exp; 98 __u8 tc_exp; 99 __u8 dspmmu_exp; 100 101 per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3; 102 lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3; 103 arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3; 104 dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3; 105 tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3; 106 dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3; 107 108 if (dspmmu_exp < dsp_exp) 109 dspmmu_exp = dsp_exp; 110 if (dspmmu_exp > dsp_exp+1) 111 dspmmu_exp = dsp_exp+1; 112 if (tc_exp < arm_exp) 113 tc_exp = arm_exp; 114 if (tc_exp < dspmmu_exp) 115 tc_exp = dspmmu_exp; 116 if (tc_exp > lcd_exp) 117 lcd_exp = tc_exp; 118 if (tc_exp > per_exp) 119 per_exp = tc_exp; 120 121 newval &= 0xf000; 122 newval |= per_exp << CKCTL_PERDIV_OFFSET; 123 newval |= lcd_exp << CKCTL_LCDDIV_OFFSET; 124 newval |= arm_exp << CKCTL_ARMDIV_OFFSET; 125 newval |= dsp_exp << CKCTL_DSPDIV_OFFSET; 126 newval |= tc_exp << CKCTL_TCDIV_OFFSET; 127 newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET; 128 129 return newval; 130 } 131 132 static int calc_dsor_exp(struct clk *clk, unsigned long rate) 133 { 134 /* Note: If target frequency is too low, this function will return 4, 135 * which is invalid value. Caller must check for this value and act 136 * accordingly. 137 * 138 * Note: This function does not check for following limitations set 139 * by the hardware (all conditions must be true): 140 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2 141 * ARM_CK >= TC_CK 142 * DSP_CK >= TC_CK 143 * DSPMMU_CK >= TC_CK 144 */ 145 unsigned long realrate; 146 struct clk * parent; 147 unsigned dsor_exp; 148 149 parent = clk->parent; 150 if (unlikely(parent == NULL)) 151 return -EIO; 152 153 realrate = parent->rate; 154 for (dsor_exp=0; dsor_exp<4; dsor_exp++) { 155 if (realrate <= rate) 156 break; 157 158 realrate /= 2; 159 } 160 161 return dsor_exp; 162 } 163 164 unsigned long omap1_ckctl_recalc(struct clk *clk) 165 { 166 /* Calculate divisor encoded as 2-bit exponent */ 167 int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset)); 168 169 return clk->parent->rate / dsor; 170 } 171 172 unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk) 173 { 174 int dsor; 175 176 /* Calculate divisor encoded as 2-bit exponent 177 * 178 * The clock control bits are in DSP domain, 179 * so api_ck is needed for access. 180 * Note that DSP_CKCTL virt addr = phys addr, so 181 * we must use __raw_readw() instead of omap_readw(). 182 */ 183 omap1_clk_enable(api_ck_p); 184 dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset)); 185 omap1_clk_disable(api_ck_p); 186 187 return clk->parent->rate / dsor; 188 } 189 190 /* MPU virtual clock functions */ 191 int omap1_select_table_rate(struct clk *clk, unsigned long rate) 192 { 193 /* Find the highest supported frequency <= rate and switch to it */ 194 struct mpu_rate * ptr; 195 unsigned long ref_rate; 196 197 ref_rate = ck_ref_p->rate; 198 199 for (ptr = omap1_rate_table; ptr->rate; ptr++) { 200 if (!(ptr->flags & cpu_mask)) 201 continue; 202 203 if (ptr->xtal != ref_rate) 204 continue; 205 206 /* Can check only after xtal frequency check */ 207 if (ptr->rate <= rate) 208 break; 209 } 210 211 if (!ptr->rate) 212 return -EINVAL; 213 214 /* 215 * In most cases we should not need to reprogram DPLL. 216 * Reprogramming the DPLL is tricky, it must be done from SRAM. 217 */ 218 omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val); 219 220 /* XXX Do we need to recalculate the tree below DPLL1 at this point? */ 221 ck_dpll1_p->rate = ptr->pll_rate; 222 223 return 0; 224 } 225 226 int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate) 227 { 228 int dsor_exp; 229 u16 regval; 230 231 dsor_exp = calc_dsor_exp(clk, rate); 232 if (dsor_exp > 3) 233 dsor_exp = -EINVAL; 234 if (dsor_exp < 0) 235 return dsor_exp; 236 237 regval = __raw_readw(DSP_CKCTL); 238 regval &= ~(3 << clk->rate_offset); 239 regval |= dsor_exp << clk->rate_offset; 240 __raw_writew(regval, DSP_CKCTL); 241 clk->rate = clk->parent->rate / (1 << dsor_exp); 242 243 return 0; 244 } 245 246 long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate) 247 { 248 int dsor_exp = calc_dsor_exp(clk, rate); 249 if (dsor_exp < 0) 250 return dsor_exp; 251 if (dsor_exp > 3) 252 dsor_exp = 3; 253 return clk->parent->rate / (1 << dsor_exp); 254 } 255 256 int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate) 257 { 258 int dsor_exp; 259 u16 regval; 260 261 dsor_exp = calc_dsor_exp(clk, rate); 262 if (dsor_exp > 3) 263 dsor_exp = -EINVAL; 264 if (dsor_exp < 0) 265 return dsor_exp; 266 267 regval = omap_readw(ARM_CKCTL); 268 regval &= ~(3 << clk->rate_offset); 269 regval |= dsor_exp << clk->rate_offset; 270 regval = verify_ckctl_value(regval); 271 omap_writew(regval, ARM_CKCTL); 272 clk->rate = clk->parent->rate / (1 << dsor_exp); 273 return 0; 274 } 275 276 long omap1_round_to_table_rate(struct clk *clk, unsigned long rate) 277 { 278 /* Find the highest supported frequency <= rate */ 279 struct mpu_rate * ptr; 280 long highest_rate; 281 unsigned long ref_rate; 282 283 ref_rate = ck_ref_p->rate; 284 285 highest_rate = -EINVAL; 286 287 for (ptr = omap1_rate_table; ptr->rate; ptr++) { 288 if (!(ptr->flags & cpu_mask)) 289 continue; 290 291 if (ptr->xtal != ref_rate) 292 continue; 293 294 highest_rate = ptr->rate; 295 296 /* Can check only after xtal frequency check */ 297 if (ptr->rate <= rate) 298 break; 299 } 300 301 return highest_rate; 302 } 303 304 static unsigned calc_ext_dsor(unsigned long rate) 305 { 306 unsigned dsor; 307 308 /* MCLK and BCLK divisor selection is not linear: 309 * freq = 96MHz / dsor 310 * 311 * RATIO_SEL range: dsor <-> RATIO_SEL 312 * 0..6: (RATIO_SEL+2) <-> (dsor-2) 313 * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6) 314 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9 315 * can not be used. 316 */ 317 for (dsor = 2; dsor < 96; ++dsor) { 318 if ((dsor & 1) && dsor > 8) 319 continue; 320 if (rate >= 96000000 / dsor) 321 break; 322 } 323 return dsor; 324 } 325 326 /* XXX Only needed on 1510 */ 327 int omap1_set_uart_rate(struct clk *clk, unsigned long rate) 328 { 329 unsigned int val; 330 331 val = __raw_readl(clk->enable_reg); 332 if (rate == 12000000) 333 val &= ~(1 << clk->enable_bit); 334 else if (rate == 48000000) 335 val |= (1 << clk->enable_bit); 336 else 337 return -EINVAL; 338 __raw_writel(val, clk->enable_reg); 339 clk->rate = rate; 340 341 return 0; 342 } 343 344 /* External clock (MCLK & BCLK) functions */ 345 int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate) 346 { 347 unsigned dsor; 348 __u16 ratio_bits; 349 350 dsor = calc_ext_dsor(rate); 351 clk->rate = 96000000 / dsor; 352 if (dsor > 8) 353 ratio_bits = ((dsor - 8) / 2 + 6) << 2; 354 else 355 ratio_bits = (dsor - 2) << 2; 356 357 ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd; 358 __raw_writew(ratio_bits, clk->enable_reg); 359 360 return 0; 361 } 362 363 int omap1_set_sossi_rate(struct clk *clk, unsigned long rate) 364 { 365 u32 l; 366 int div; 367 unsigned long p_rate; 368 369 p_rate = clk->parent->rate; 370 /* Round towards slower frequency */ 371 div = (p_rate + rate - 1) / rate; 372 div--; 373 if (div < 0 || div > 7) 374 return -EINVAL; 375 376 l = omap_readl(MOD_CONF_CTRL_1); 377 l &= ~(7 << 17); 378 l |= div << 17; 379 omap_writel(l, MOD_CONF_CTRL_1); 380 381 clk->rate = p_rate / (div + 1); 382 383 return 0; 384 } 385 386 long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate) 387 { 388 return 96000000 / calc_ext_dsor(rate); 389 } 390 391 void omap1_init_ext_clk(struct clk *clk) 392 { 393 unsigned dsor; 394 __u16 ratio_bits; 395 396 /* Determine current rate and ensure clock is based on 96MHz APLL */ 397 ratio_bits = __raw_readw(clk->enable_reg) & ~1; 398 __raw_writew(ratio_bits, clk->enable_reg); 399 400 ratio_bits = (ratio_bits & 0xfc) >> 2; 401 if (ratio_bits > 6) 402 dsor = (ratio_bits - 6) * 2 + 8; 403 else 404 dsor = ratio_bits + 2; 405 406 clk-> rate = 96000000 / dsor; 407 } 408 409 int omap1_clk_enable(struct clk *clk) 410 { 411 int ret = 0; 412 413 if (clk->usecount++ == 0) { 414 if (clk->parent) { 415 ret = omap1_clk_enable(clk->parent); 416 if (ret) 417 goto err; 418 419 if (clk->flags & CLOCK_NO_IDLE_PARENT) 420 omap1_clk_deny_idle(clk->parent); 421 } 422 423 ret = clk->ops->enable(clk); 424 if (ret) { 425 if (clk->parent) 426 omap1_clk_disable(clk->parent); 427 goto err; 428 } 429 } 430 return ret; 431 432 err: 433 clk->usecount--; 434 return ret; 435 } 436 437 void omap1_clk_disable(struct clk *clk) 438 { 439 if (clk->usecount > 0 && !(--clk->usecount)) { 440 clk->ops->disable(clk); 441 if (likely(clk->parent)) { 442 omap1_clk_disable(clk->parent); 443 if (clk->flags & CLOCK_NO_IDLE_PARENT) 444 omap1_clk_allow_idle(clk->parent); 445 } 446 } 447 } 448 449 static int omap1_clk_enable_generic(struct clk *clk) 450 { 451 __u16 regval16; 452 __u32 regval32; 453 454 if (unlikely(clk->enable_reg == NULL)) { 455 printk(KERN_ERR "clock.c: Enable for %s without enable code\n", 456 clk->name); 457 return -EINVAL; 458 } 459 460 if (clk->flags & ENABLE_REG_32BIT) { 461 regval32 = __raw_readl(clk->enable_reg); 462 regval32 |= (1 << clk->enable_bit); 463 __raw_writel(regval32, clk->enable_reg); 464 } else { 465 regval16 = __raw_readw(clk->enable_reg); 466 regval16 |= (1 << clk->enable_bit); 467 __raw_writew(regval16, clk->enable_reg); 468 } 469 470 return 0; 471 } 472 473 static void omap1_clk_disable_generic(struct clk *clk) 474 { 475 __u16 regval16; 476 __u32 regval32; 477 478 if (clk->enable_reg == NULL) 479 return; 480 481 if (clk->flags & ENABLE_REG_32BIT) { 482 regval32 = __raw_readl(clk->enable_reg); 483 regval32 &= ~(1 << clk->enable_bit); 484 __raw_writel(regval32, clk->enable_reg); 485 } else { 486 regval16 = __raw_readw(clk->enable_reg); 487 regval16 &= ~(1 << clk->enable_bit); 488 __raw_writew(regval16, clk->enable_reg); 489 } 490 } 491 492 const struct clkops clkops_generic = { 493 .enable = omap1_clk_enable_generic, 494 .disable = omap1_clk_disable_generic, 495 }; 496 497 static int omap1_clk_enable_dsp_domain(struct clk *clk) 498 { 499 int retval; 500 501 retval = omap1_clk_enable(api_ck_p); 502 if (!retval) { 503 retval = omap1_clk_enable_generic(clk); 504 omap1_clk_disable(api_ck_p); 505 } 506 507 return retval; 508 } 509 510 static void omap1_clk_disable_dsp_domain(struct clk *clk) 511 { 512 if (omap1_clk_enable(api_ck_p) == 0) { 513 omap1_clk_disable_generic(clk); 514 omap1_clk_disable(api_ck_p); 515 } 516 } 517 518 const struct clkops clkops_dspck = { 519 .enable = omap1_clk_enable_dsp_domain, 520 .disable = omap1_clk_disable_dsp_domain, 521 }; 522 523 /* XXX SYSC register handling does not belong in the clock framework */ 524 static int omap1_clk_enable_uart_functional_16xx(struct clk *clk) 525 { 526 int ret; 527 struct uart_clk *uclk; 528 529 ret = omap1_clk_enable_generic(clk); 530 if (ret == 0) { 531 /* Set smart idle acknowledgement mode */ 532 uclk = (struct uart_clk *)clk; 533 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8, 534 uclk->sysc_addr); 535 } 536 537 return ret; 538 } 539 540 /* XXX SYSC register handling does not belong in the clock framework */ 541 static void omap1_clk_disable_uart_functional_16xx(struct clk *clk) 542 { 543 struct uart_clk *uclk; 544 545 /* Set force idle acknowledgement mode */ 546 uclk = (struct uart_clk *)clk; 547 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr); 548 549 omap1_clk_disable_generic(clk); 550 } 551 552 /* XXX SYSC register handling does not belong in the clock framework */ 553 const struct clkops clkops_uart_16xx = { 554 .enable = omap1_clk_enable_uart_functional_16xx, 555 .disable = omap1_clk_disable_uart_functional_16xx, 556 }; 557 558 long omap1_clk_round_rate(struct clk *clk, unsigned long rate) 559 { 560 if (clk->round_rate != NULL) 561 return clk->round_rate(clk, rate); 562 563 return clk->rate; 564 } 565 566 int omap1_clk_set_rate(struct clk *clk, unsigned long rate) 567 { 568 int ret = -EINVAL; 569 570 if (clk->set_rate) 571 ret = clk->set_rate(clk, rate); 572 return ret; 573 } 574 575 /* 576 * Omap1 clock reset and init functions 577 */ 578 579 #ifdef CONFIG_OMAP_RESET_CLOCKS 580 581 void omap1_clk_disable_unused(struct clk *clk) 582 { 583 __u32 regval32; 584 585 /* Clocks in the DSP domain need api_ck. Just assume bootloader 586 * has not enabled any DSP clocks */ 587 if (clk->enable_reg == DSP_IDLECT2) { 588 pr_info("Skipping reset check for DSP domain clock \"%s\"\n", 589 clk->name); 590 return; 591 } 592 593 /* Is the clock already disabled? */ 594 if (clk->flags & ENABLE_REG_32BIT) 595 regval32 = __raw_readl(clk->enable_reg); 596 else 597 regval32 = __raw_readw(clk->enable_reg); 598 599 if ((regval32 & (1 << clk->enable_bit)) == 0) 600 return; 601 602 printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name); 603 clk->ops->disable(clk); 604 printk(" done\n"); 605 } 606 607 #endif 608 609 610 int clk_enable(struct clk *clk) 611 { 612 unsigned long flags; 613 int ret; 614 615 if (IS_ERR_OR_NULL(clk)) 616 return -EINVAL; 617 618 spin_lock_irqsave(&clockfw_lock, flags); 619 ret = omap1_clk_enable(clk); 620 spin_unlock_irqrestore(&clockfw_lock, flags); 621 622 return ret; 623 } 624 EXPORT_SYMBOL(clk_enable); 625 626 void clk_disable(struct clk *clk) 627 { 628 unsigned long flags; 629 630 if (IS_ERR_OR_NULL(clk)) 631 return; 632 633 spin_lock_irqsave(&clockfw_lock, flags); 634 if (clk->usecount == 0) { 635 pr_err("Trying disable clock %s with 0 usecount\n", 636 clk->name); 637 WARN_ON(1); 638 goto out; 639 } 640 641 omap1_clk_disable(clk); 642 643 out: 644 spin_unlock_irqrestore(&clockfw_lock, flags); 645 } 646 EXPORT_SYMBOL(clk_disable); 647 648 unsigned long clk_get_rate(struct clk *clk) 649 { 650 unsigned long flags; 651 unsigned long ret; 652 653 if (IS_ERR_OR_NULL(clk)) 654 return 0; 655 656 spin_lock_irqsave(&clockfw_lock, flags); 657 ret = clk->rate; 658 spin_unlock_irqrestore(&clockfw_lock, flags); 659 660 return ret; 661 } 662 EXPORT_SYMBOL(clk_get_rate); 663 664 /* 665 * Optional clock functions defined in include/linux/clk.h 666 */ 667 668 long clk_round_rate(struct clk *clk, unsigned long rate) 669 { 670 unsigned long flags; 671 long ret; 672 673 if (IS_ERR_OR_NULL(clk)) 674 return 0; 675 676 spin_lock_irqsave(&clockfw_lock, flags); 677 ret = omap1_clk_round_rate(clk, rate); 678 spin_unlock_irqrestore(&clockfw_lock, flags); 679 680 return ret; 681 } 682 EXPORT_SYMBOL(clk_round_rate); 683 684 int clk_set_rate(struct clk *clk, unsigned long rate) 685 { 686 unsigned long flags; 687 int ret = -EINVAL; 688 689 if (IS_ERR_OR_NULL(clk)) 690 return ret; 691 692 spin_lock_irqsave(&clockfw_lock, flags); 693 ret = omap1_clk_set_rate(clk, rate); 694 if (ret == 0) 695 propagate_rate(clk); 696 spin_unlock_irqrestore(&clockfw_lock, flags); 697 698 return ret; 699 } 700 EXPORT_SYMBOL(clk_set_rate); 701 702 int clk_set_parent(struct clk *clk, struct clk *parent) 703 { 704 WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n"); 705 706 return -EINVAL; 707 } 708 EXPORT_SYMBOL(clk_set_parent); 709 710 struct clk *clk_get_parent(struct clk *clk) 711 { 712 return clk->parent; 713 } 714 EXPORT_SYMBOL(clk_get_parent); 715 716 /* 717 * OMAP specific clock functions shared between omap1 and omap2 718 */ 719 720 /* Used for clocks that always have same value as the parent clock */ 721 unsigned long followparent_recalc(struct clk *clk) 722 { 723 return clk->parent->rate; 724 } 725 726 /* 727 * Used for clocks that have the same value as the parent clock, 728 * divided by some factor 729 */ 730 unsigned long omap_fixed_divisor_recalc(struct clk *clk) 731 { 732 WARN_ON(!clk->fixed_div); 733 734 return clk->parent->rate / clk->fixed_div; 735 } 736 737 /* Propagate rate to children */ 738 void propagate_rate(struct clk *tclk) 739 { 740 struct clk *clkp; 741 742 list_for_each_entry(clkp, &tclk->children, sibling) { 743 if (clkp->recalc) 744 clkp->rate = clkp->recalc(clkp); 745 propagate_rate(clkp); 746 } 747 } 748 749 static LIST_HEAD(root_clks); 750 751 /** 752 * clk_preinit - initialize any fields in the struct clk before clk init 753 * @clk: struct clk * to initialize 754 * 755 * Initialize any struct clk fields needed before normal clk initialization 756 * can run. No return value. 757 */ 758 void clk_preinit(struct clk *clk) 759 { 760 INIT_LIST_HEAD(&clk->children); 761 } 762 763 int clk_register(struct clk *clk) 764 { 765 if (IS_ERR_OR_NULL(clk)) 766 return -EINVAL; 767 768 /* 769 * trap out already registered clocks 770 */ 771 if (clk->node.next || clk->node.prev) 772 return 0; 773 774 mutex_lock(&clocks_mutex); 775 if (clk->parent) 776 list_add(&clk->sibling, &clk->parent->children); 777 else 778 list_add(&clk->sibling, &root_clks); 779 780 list_add(&clk->node, &clocks); 781 if (clk->init) 782 clk->init(clk); 783 mutex_unlock(&clocks_mutex); 784 785 return 0; 786 } 787 EXPORT_SYMBOL(clk_register); 788 789 void clk_unregister(struct clk *clk) 790 { 791 if (IS_ERR_OR_NULL(clk)) 792 return; 793 794 mutex_lock(&clocks_mutex); 795 list_del(&clk->sibling); 796 list_del(&clk->node); 797 mutex_unlock(&clocks_mutex); 798 } 799 EXPORT_SYMBOL(clk_unregister); 800 801 /* 802 * Low level helpers 803 */ 804 static int clkll_enable_null(struct clk *clk) 805 { 806 return 0; 807 } 808 809 static void clkll_disable_null(struct clk *clk) 810 { 811 } 812 813 const struct clkops clkops_null = { 814 .enable = clkll_enable_null, 815 .disable = clkll_disable_null, 816 }; 817 818 /* 819 * Dummy clock 820 * 821 * Used for clock aliases that are needed on some OMAPs, but not others 822 */ 823 struct clk dummy_ck = { 824 .name = "dummy", 825 .ops = &clkops_null, 826 }; 827 828 /* 829 * 830 */ 831 832 #ifdef CONFIG_OMAP_RESET_CLOCKS 833 /* 834 * Disable any unused clocks left on by the bootloader 835 */ 836 static int __init clk_disable_unused(void) 837 { 838 struct clk *ck; 839 unsigned long flags; 840 841 pr_info("clock: disabling unused clocks to save power\n"); 842 843 spin_lock_irqsave(&clockfw_lock, flags); 844 list_for_each_entry(ck, &clocks, node) { 845 if (ck->ops == &clkops_null) 846 continue; 847 848 if (ck->usecount > 0 || !ck->enable_reg) 849 continue; 850 851 omap1_clk_disable_unused(ck); 852 } 853 spin_unlock_irqrestore(&clockfw_lock, flags); 854 855 return 0; 856 } 857 late_initcall(clk_disable_unused); 858 #endif 859 860 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) 861 /* 862 * debugfs support to trace clock tree hierarchy and attributes 863 */ 864 865 #include <linux/debugfs.h> 866 #include <linux/seq_file.h> 867 868 static struct dentry *clk_debugfs_root; 869 870 static int debug_clock_show(struct seq_file *s, void *unused) 871 { 872 struct clk *c; 873 struct clk *pa; 874 875 mutex_lock(&clocks_mutex); 876 seq_printf(s, "%-30s %-30s %-10s %s\n", 877 "clock-name", "parent-name", "rate", "use-count"); 878 879 list_for_each_entry(c, &clocks, node) { 880 pa = c->parent; 881 seq_printf(s, "%-30s %-30s %-10lu %d\n", 882 c->name, pa ? pa->name : "none", c->rate, 883 c->usecount); 884 } 885 mutex_unlock(&clocks_mutex); 886 887 return 0; 888 } 889 890 DEFINE_SHOW_ATTRIBUTE(debug_clock); 891 892 static void clk_debugfs_register_one(struct clk *c) 893 { 894 struct dentry *d; 895 struct clk *pa = c->parent; 896 897 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root); 898 c->dent = d; 899 900 debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount); 901 debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate); 902 debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags); 903 } 904 905 static void clk_debugfs_register(struct clk *c) 906 { 907 struct clk *pa = c->parent; 908 909 if (pa && !pa->dent) 910 clk_debugfs_register(pa); 911 912 if (!c->dent) 913 clk_debugfs_register_one(c); 914 } 915 916 static int __init clk_debugfs_init(void) 917 { 918 struct clk *c; 919 struct dentry *d; 920 921 d = debugfs_create_dir("clock", NULL); 922 clk_debugfs_root = d; 923 924 list_for_each_entry(c, &clocks, node) 925 clk_debugfs_register(c); 926 927 debugfs_create_file("summary", S_IRUGO, d, NULL, &debug_clock_fops); 928 929 return 0; 930 } 931 late_initcall(clk_debugfs_init); 932 933 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */ 934