1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com> 4 */ 5 6 #include <linux/clk-provider.h> 7 #include <linux/io.h> 8 #include <linux/gcd.h> 9 #include <linux/spinlock.h> 10 11 #include "clk-cv18xx-ip.h" 12 13 /* GATE */ 14 static inline struct cv1800_clk_gate *hw_to_cv1800_clk_gate(struct clk_hw *hw) 15 { 16 struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); 17 18 return container_of(common, struct cv1800_clk_gate, common); 19 } 20 21 static int gate_enable(struct clk_hw *hw) 22 { 23 struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw); 24 25 return cv1800_clk_setbit(&gate->common, &gate->gate); 26 } 27 28 static void gate_disable(struct clk_hw *hw) 29 { 30 struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw); 31 32 cv1800_clk_clearbit(&gate->common, &gate->gate); 33 } 34 35 static int gate_is_enabled(struct clk_hw *hw) 36 { 37 struct cv1800_clk_gate *gate = hw_to_cv1800_clk_gate(hw); 38 39 return cv1800_clk_checkbit(&gate->common, &gate->gate); 40 } 41 42 static unsigned long gate_recalc_rate(struct clk_hw *hw, 43 unsigned long parent_rate) 44 { 45 return parent_rate; 46 } 47 48 static long gate_round_rate(struct clk_hw *hw, unsigned long rate, 49 unsigned long *parent_rate) 50 { 51 return *parent_rate; 52 } 53 54 static int gate_set_rate(struct clk_hw *hw, unsigned long rate, 55 unsigned long parent_rate) 56 { 57 return 0; 58 } 59 60 const struct clk_ops cv1800_clk_gate_ops = { 61 .disable = gate_disable, 62 .enable = gate_enable, 63 .is_enabled = gate_is_enabled, 64 65 .recalc_rate = gate_recalc_rate, 66 .round_rate = gate_round_rate, 67 .set_rate = gate_set_rate, 68 }; 69 70 /* DIV */ 71 #define _DIV_EN_CLK_DIV_FACTOR_FIELD BIT(3) 72 73 #define DIV_GET_EN_CLK_DIV_FACTOR(_reg) \ 74 FIELD_GET(_DIV_EN_CLK_DIV_FACTOR_FIELD, _reg) 75 76 #define DIV_SET_EN_DIV_FACTOR(_reg) \ 77 _CV1800_SET_FIELD(_reg, 1, _DIV_EN_CLK_DIV_FACTOR_FIELD) 78 79 static inline struct cv1800_clk_div *hw_to_cv1800_clk_div(struct clk_hw *hw) 80 { 81 struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); 82 83 return container_of(common, struct cv1800_clk_div, common); 84 } 85 86 static int div_enable(struct clk_hw *hw) 87 { 88 struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); 89 90 return cv1800_clk_setbit(&div->common, &div->gate); 91 } 92 93 static void div_disable(struct clk_hw *hw) 94 { 95 struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); 96 97 cv1800_clk_clearbit(&div->common, &div->gate); 98 } 99 100 static int div_is_enabled(struct clk_hw *hw) 101 { 102 struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); 103 104 return cv1800_clk_checkbit(&div->common, &div->gate); 105 } 106 107 static int div_helper_set_rate(struct cv1800_clk_common *common, 108 struct cv1800_clk_regfield *div, 109 unsigned long val) 110 { 111 unsigned long flags; 112 u32 reg; 113 114 if (div->width == 0) 115 return 0; 116 117 spin_lock_irqsave(common->lock, flags); 118 119 reg = readl(common->base + div->reg); 120 reg = cv1800_clk_regfield_set(reg, val, div); 121 if (div->initval > 0) 122 reg = DIV_SET_EN_DIV_FACTOR(reg); 123 124 writel(reg, common->base + div->reg); 125 126 spin_unlock_irqrestore(common->lock, flags); 127 128 return 0; 129 } 130 131 static u32 div_helper_get_clockdiv(struct cv1800_clk_common *common, 132 struct cv1800_clk_regfield *div) 133 { 134 u32 clockdiv = 1; 135 u32 reg; 136 137 if (!div || div->initval < 0 || (div->width == 0 && div->initval <= 0)) 138 return 1; 139 140 if (div->width == 0 && div->initval > 0) 141 return div->initval; 142 143 reg = readl(common->base + div->reg); 144 145 if (div->initval == 0 || DIV_GET_EN_CLK_DIV_FACTOR(reg)) 146 clockdiv = cv1800_clk_regfield_get(reg, div); 147 else if (div->initval > 0) 148 clockdiv = div->initval; 149 150 return clockdiv; 151 } 152 153 static u32 div_helper_round_rate(struct cv1800_clk_regfield *div, 154 struct clk_hw *hw, struct clk_hw *parent, 155 unsigned long rate, unsigned long *prate) 156 { 157 if (div->width == 0) { 158 if (div->initval <= 0) 159 return DIV_ROUND_UP_ULL(*prate, 1); 160 else 161 return DIV_ROUND_UP_ULL(*prate, div->initval); 162 } 163 164 return divider_round_rate_parent(hw, parent, rate, prate, NULL, 165 div->width, div->flags); 166 } 167 168 static long div_round_rate(struct clk_hw *parent, unsigned long *parent_rate, 169 unsigned long rate, int id, void *data) 170 { 171 struct cv1800_clk_div *div = data; 172 173 return div_helper_round_rate(&div->div, &div->common.hw, parent, 174 rate, parent_rate); 175 } 176 177 static bool div_is_better_rate(struct cv1800_clk_common *common, 178 unsigned long target, unsigned long now, 179 unsigned long best) 180 { 181 if (common->features & CLK_DIVIDER_ROUND_CLOSEST) 182 return abs_diff(target, now) < abs_diff(target, best); 183 184 return now <= target && now > best; 185 } 186 187 static int mux_helper_determine_rate(struct cv1800_clk_common *common, 188 struct clk_rate_request *req, 189 long (*round)(struct clk_hw *, 190 unsigned long *, 191 unsigned long, 192 int, 193 void *), 194 void *data) 195 { 196 unsigned long best_parent_rate = 0, best_rate = 0; 197 struct clk_hw *best_parent, *hw = &common->hw; 198 unsigned int i; 199 200 if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) { 201 unsigned long adj_parent_rate; 202 203 best_parent = clk_hw_get_parent(hw); 204 best_parent_rate = clk_hw_get_rate(best_parent); 205 206 best_rate = round(best_parent, &adj_parent_rate, 207 req->rate, -1, data); 208 209 goto find; 210 } 211 212 for (i = 0; i < clk_hw_get_num_parents(hw); i++) { 213 unsigned long tmp_rate, parent_rate; 214 struct clk_hw *parent; 215 216 parent = clk_hw_get_parent_by_index(hw, i); 217 if (!parent) 218 continue; 219 220 parent_rate = clk_hw_get_rate(parent); 221 222 tmp_rate = round(parent, &parent_rate, req->rate, i, data); 223 224 if (tmp_rate == req->rate) { 225 best_parent = parent; 226 best_parent_rate = parent_rate; 227 best_rate = tmp_rate; 228 goto find; 229 } 230 231 if (div_is_better_rate(common, req->rate, 232 tmp_rate, best_rate)) { 233 best_parent = parent; 234 best_parent_rate = parent_rate; 235 best_rate = tmp_rate; 236 } 237 } 238 239 if (best_rate == 0) 240 return -EINVAL; 241 242 find: 243 req->best_parent_hw = best_parent; 244 req->best_parent_rate = best_parent_rate; 245 req->rate = best_rate; 246 return 0; 247 } 248 249 static int div_determine_rate(struct clk_hw *hw, 250 struct clk_rate_request *req) 251 { 252 struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); 253 254 return mux_helper_determine_rate(&div->common, req, 255 div_round_rate, div); 256 } 257 258 static unsigned long div_recalc_rate(struct clk_hw *hw, 259 unsigned long parent_rate) 260 { 261 struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); 262 unsigned long val; 263 264 val = div_helper_get_clockdiv(&div->common, &div->div); 265 if (val == 0) 266 return 0; 267 268 return divider_recalc_rate(hw, parent_rate, val, NULL, 269 div->div.flags, div->div.width); 270 } 271 272 static int div_set_rate(struct clk_hw *hw, unsigned long rate, 273 unsigned long parent_rate) 274 { 275 struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); 276 unsigned long val; 277 278 val = divider_get_val(rate, parent_rate, NULL, 279 div->div.width, div->div.flags); 280 281 return div_helper_set_rate(&div->common, &div->div, val); 282 } 283 284 const struct clk_ops cv1800_clk_div_ops = { 285 .disable = div_disable, 286 .enable = div_enable, 287 .is_enabled = div_is_enabled, 288 289 .determine_rate = div_determine_rate, 290 .recalc_rate = div_recalc_rate, 291 .set_rate = div_set_rate, 292 }; 293 294 static inline struct cv1800_clk_bypass_div * 295 hw_to_cv1800_clk_bypass_div(struct clk_hw *hw) 296 { 297 struct cv1800_clk_div *div = hw_to_cv1800_clk_div(hw); 298 299 return container_of(div, struct cv1800_clk_bypass_div, div); 300 } 301 302 static long bypass_div_round_rate(struct clk_hw *parent, 303 unsigned long *parent_rate, 304 unsigned long rate, int id, void *data) 305 { 306 struct cv1800_clk_bypass_div *div = data; 307 308 if (id == -1) { 309 if (cv1800_clk_checkbit(&div->div.common, &div->bypass)) 310 return *parent_rate; 311 else 312 return div_round_rate(parent, parent_rate, rate, 313 -1, &div->div); 314 } 315 316 if (id == 0) 317 return *parent_rate; 318 319 return div_round_rate(parent, parent_rate, rate, id - 1, &div->div); 320 } 321 322 static int bypass_div_determine_rate(struct clk_hw *hw, 323 struct clk_rate_request *req) 324 { 325 struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); 326 327 return mux_helper_determine_rate(&div->div.common, req, 328 bypass_div_round_rate, div); 329 } 330 331 static unsigned long bypass_div_recalc_rate(struct clk_hw *hw, 332 unsigned long parent_rate) 333 { 334 struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); 335 336 if (cv1800_clk_checkbit(&div->div.common, &div->bypass)) 337 return parent_rate; 338 339 return div_recalc_rate(hw, parent_rate); 340 } 341 342 static int bypass_div_set_rate(struct clk_hw *hw, unsigned long rate, 343 unsigned long parent_rate) 344 { 345 struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); 346 347 if (cv1800_clk_checkbit(&div->div.common, &div->bypass)) 348 return 0; 349 350 return div_set_rate(hw, rate, parent_rate); 351 } 352 353 static u8 bypass_div_get_parent(struct clk_hw *hw) 354 { 355 struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); 356 357 if (cv1800_clk_checkbit(&div->div.common, &div->bypass)) 358 return 0; 359 360 return 1; 361 } 362 363 static int bypass_div_set_parent(struct clk_hw *hw, u8 index) 364 { 365 struct cv1800_clk_bypass_div *div = hw_to_cv1800_clk_bypass_div(hw); 366 367 if (index) 368 return cv1800_clk_clearbit(&div->div.common, &div->bypass); 369 370 return cv1800_clk_setbit(&div->div.common, &div->bypass); 371 } 372 373 const struct clk_ops cv1800_clk_bypass_div_ops = { 374 .disable = div_disable, 375 .enable = div_enable, 376 .is_enabled = div_is_enabled, 377 378 .determine_rate = bypass_div_determine_rate, 379 .recalc_rate = bypass_div_recalc_rate, 380 .set_rate = bypass_div_set_rate, 381 382 .set_parent = bypass_div_set_parent, 383 .get_parent = bypass_div_get_parent, 384 }; 385 386 /* MUX */ 387 static inline struct cv1800_clk_mux *hw_to_cv1800_clk_mux(struct clk_hw *hw) 388 { 389 struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); 390 391 return container_of(common, struct cv1800_clk_mux, common); 392 } 393 394 static int mux_enable(struct clk_hw *hw) 395 { 396 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 397 398 return cv1800_clk_setbit(&mux->common, &mux->gate); 399 } 400 401 static void mux_disable(struct clk_hw *hw) 402 { 403 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 404 405 cv1800_clk_clearbit(&mux->common, &mux->gate); 406 } 407 408 static int mux_is_enabled(struct clk_hw *hw) 409 { 410 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 411 412 return cv1800_clk_checkbit(&mux->common, &mux->gate); 413 } 414 415 static long mux_round_rate(struct clk_hw *parent, unsigned long *parent_rate, 416 unsigned long rate, int id, void *data) 417 { 418 struct cv1800_clk_mux *mux = data; 419 420 return div_helper_round_rate(&mux->div, &mux->common.hw, parent, 421 rate, parent_rate); 422 } 423 424 static int mux_determine_rate(struct clk_hw *hw, 425 struct clk_rate_request *req) 426 { 427 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 428 429 return mux_helper_determine_rate(&mux->common, req, 430 mux_round_rate, mux); 431 } 432 433 static unsigned long mux_recalc_rate(struct clk_hw *hw, 434 unsigned long parent_rate) 435 { 436 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 437 unsigned long val; 438 439 val = div_helper_get_clockdiv(&mux->common, &mux->div); 440 if (val == 0) 441 return 0; 442 443 return divider_recalc_rate(hw, parent_rate, val, NULL, 444 mux->div.flags, mux->div.width); 445 } 446 447 static int mux_set_rate(struct clk_hw *hw, unsigned long rate, 448 unsigned long parent_rate) 449 { 450 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 451 unsigned long val; 452 453 val = divider_get_val(rate, parent_rate, NULL, 454 mux->div.width, mux->div.flags); 455 456 return div_helper_set_rate(&mux->common, &mux->div, val); 457 } 458 459 static u8 mux_get_parent(struct clk_hw *hw) 460 { 461 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 462 u32 reg = readl(mux->common.base + mux->mux.reg); 463 464 return cv1800_clk_regfield_get(reg, &mux->mux); 465 } 466 467 static int _mux_set_parent(struct cv1800_clk_mux *mux, u8 index) 468 { 469 u32 reg; 470 471 reg = readl(mux->common.base + mux->mux.reg); 472 reg = cv1800_clk_regfield_set(reg, index, &mux->mux); 473 writel(reg, mux->common.base + mux->mux.reg); 474 475 return 0; 476 } 477 478 static int mux_set_parent(struct clk_hw *hw, u8 index) 479 { 480 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 481 unsigned long flags; 482 483 spin_lock_irqsave(mux->common.lock, flags); 484 485 _mux_set_parent(mux, index); 486 487 spin_unlock_irqrestore(mux->common.lock, flags); 488 489 return 0; 490 } 491 492 const struct clk_ops cv1800_clk_mux_ops = { 493 .disable = mux_disable, 494 .enable = mux_enable, 495 .is_enabled = mux_is_enabled, 496 497 .determine_rate = mux_determine_rate, 498 .recalc_rate = mux_recalc_rate, 499 .set_rate = mux_set_rate, 500 501 .set_parent = mux_set_parent, 502 .get_parent = mux_get_parent, 503 }; 504 505 static inline struct cv1800_clk_bypass_mux * 506 hw_to_cv1800_clk_bypass_mux(struct clk_hw *hw) 507 { 508 struct cv1800_clk_mux *mux = hw_to_cv1800_clk_mux(hw); 509 510 return container_of(mux, struct cv1800_clk_bypass_mux, mux); 511 } 512 513 static long bypass_mux_round_rate(struct clk_hw *parent, 514 unsigned long *parent_rate, 515 unsigned long rate, int id, void *data) 516 { 517 struct cv1800_clk_bypass_mux *mux = data; 518 519 if (id == -1) { 520 if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass)) 521 return *parent_rate; 522 else 523 return mux_round_rate(parent, parent_rate, rate, 524 -1, &mux->mux); 525 } 526 527 if (id == 0) 528 return *parent_rate; 529 530 return mux_round_rate(parent, parent_rate, rate, id - 1, &mux->mux); 531 } 532 533 static int bypass_mux_determine_rate(struct clk_hw *hw, 534 struct clk_rate_request *req) 535 { 536 struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); 537 538 return mux_helper_determine_rate(&mux->mux.common, req, 539 bypass_mux_round_rate, mux); 540 } 541 542 static unsigned long bypass_mux_recalc_rate(struct clk_hw *hw, 543 unsigned long parent_rate) 544 { 545 struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); 546 547 if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass)) 548 return parent_rate; 549 550 return mux_recalc_rate(hw, parent_rate); 551 } 552 553 static int bypass_mux_set_rate(struct clk_hw *hw, unsigned long rate, 554 unsigned long parent_rate) 555 { 556 struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); 557 558 if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass)) 559 return 0; 560 561 return mux_set_rate(hw, rate, parent_rate); 562 } 563 564 static u8 bypass_mux_get_parent(struct clk_hw *hw) 565 { 566 struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); 567 568 if (cv1800_clk_checkbit(&mux->mux.common, &mux->bypass)) 569 return 0; 570 571 return mux_get_parent(hw) + 1; 572 } 573 574 static int bypass_mux_set_parent(struct clk_hw *hw, u8 index) 575 { 576 struct cv1800_clk_bypass_mux *mux = hw_to_cv1800_clk_bypass_mux(hw); 577 578 if (index == 0) 579 return cv1800_clk_setbit(&mux->mux.common, &mux->bypass); 580 581 return cv1800_clk_clearbit(&mux->mux.common, &mux->bypass); 582 } 583 584 const struct clk_ops cv1800_clk_bypass_mux_ops = { 585 .disable = mux_disable, 586 .enable = mux_enable, 587 .is_enabled = mux_is_enabled, 588 589 .determine_rate = bypass_mux_determine_rate, 590 .recalc_rate = bypass_mux_recalc_rate, 591 .set_rate = bypass_mux_set_rate, 592 593 .set_parent = bypass_mux_set_parent, 594 .get_parent = bypass_mux_get_parent, 595 }; 596 597 /* MMUX */ 598 static inline struct cv1800_clk_mmux *hw_to_cv1800_clk_mmux(struct clk_hw *hw) 599 { 600 struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); 601 602 return container_of(common, struct cv1800_clk_mmux, common); 603 } 604 605 static u8 mmux_get_parent_id(struct cv1800_clk_mmux *mmux) 606 { 607 struct clk_hw *hw = &mmux->common.hw; 608 struct clk_hw *parent = clk_hw_get_parent(hw); 609 unsigned int i; 610 611 for (i = 0; i < clk_hw_get_num_parents(hw); i++) { 612 if (parent == clk_hw_get_parent_by_index(hw, i)) 613 return i; 614 } 615 616 unreachable(); 617 } 618 619 static int mmux_enable(struct clk_hw *hw) 620 { 621 struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); 622 623 return cv1800_clk_setbit(&mmux->common, &mmux->gate); 624 } 625 626 static void mmux_disable(struct clk_hw *hw) 627 { 628 struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); 629 630 cv1800_clk_clearbit(&mmux->common, &mmux->gate); 631 } 632 633 static int mmux_is_enabled(struct clk_hw *hw) 634 { 635 struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); 636 637 return cv1800_clk_checkbit(&mmux->common, &mmux->gate); 638 } 639 640 static long mmux_round_rate(struct clk_hw *parent, unsigned long *parent_rate, 641 unsigned long rate, int id, void *data) 642 { 643 struct cv1800_clk_mmux *mmux = data; 644 s8 div_id; 645 646 if (id == -1) { 647 if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass)) 648 return *parent_rate; 649 650 id = mmux_get_parent_id(mmux); 651 } 652 653 div_id = mmux->parent2sel[id]; 654 655 if (div_id < 0) 656 return *parent_rate; 657 658 return div_helper_round_rate(&mmux->div[div_id], 659 &mmux->common.hw, parent, 660 rate, parent_rate); 661 } 662 663 static int mmux_determine_rate(struct clk_hw *hw, 664 struct clk_rate_request *req) 665 { 666 struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); 667 668 return mux_helper_determine_rate(&mmux->common, req, 669 mmux_round_rate, mmux); 670 } 671 672 static unsigned long mmux_recalc_rate(struct clk_hw *hw, 673 unsigned long parent_rate) 674 { 675 struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); 676 unsigned long val; 677 struct cv1800_clk_regfield *div; 678 679 if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass)) 680 return parent_rate; 681 682 if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel)) 683 div = &mmux->div[0]; 684 else 685 div = &mmux->div[1]; 686 687 val = div_helper_get_clockdiv(&mmux->common, div); 688 if (val == 0) 689 return 0; 690 691 return divider_recalc_rate(hw, parent_rate, val, NULL, 692 div->flags, div->width); 693 } 694 695 static int mmux_set_rate(struct clk_hw *hw, unsigned long rate, 696 unsigned long parent_rate) 697 { 698 struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); 699 struct cv1800_clk_regfield *div; 700 unsigned long val; 701 702 if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass)) 703 return parent_rate; 704 705 if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel)) 706 div = &mmux->div[0]; 707 else 708 div = &mmux->div[1]; 709 710 val = divider_get_val(rate, parent_rate, NULL, 711 div->width, div->flags); 712 713 return div_helper_set_rate(&mmux->common, div, val); 714 } 715 716 static u8 mmux_get_parent(struct clk_hw *hw) 717 { 718 struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); 719 struct cv1800_clk_regfield *mux; 720 u32 reg; 721 s8 clk_sel; 722 723 if (cv1800_clk_checkbit(&mmux->common, &mmux->bypass)) 724 return 0; 725 726 if (cv1800_clk_checkbit(&mmux->common, &mmux->clk_sel)) 727 clk_sel = 0; 728 else 729 clk_sel = 1; 730 mux = &mmux->mux[clk_sel]; 731 732 reg = readl(mmux->common.base + mux->reg); 733 734 return mmux->sel2parent[clk_sel][cv1800_clk_regfield_get(reg, mux)]; 735 } 736 737 static int mmux_set_parent(struct clk_hw *hw, u8 index) 738 { 739 struct cv1800_clk_mmux *mmux = hw_to_cv1800_clk_mmux(hw); 740 struct cv1800_clk_regfield *mux; 741 unsigned long flags; 742 u32 reg; 743 s8 clk_sel = mmux->parent2sel[index]; 744 745 if (index == 0 || clk_sel == -1) { 746 cv1800_clk_setbit(&mmux->common, &mmux->bypass); 747 goto release; 748 } 749 750 cv1800_clk_clearbit(&mmux->common, &mmux->bypass); 751 752 if (clk_sel) 753 cv1800_clk_clearbit(&mmux->common, &mmux->clk_sel); 754 else 755 cv1800_clk_setbit(&mmux->common, &mmux->clk_sel); 756 757 spin_lock_irqsave(mmux->common.lock, flags); 758 759 mux = &mmux->mux[clk_sel]; 760 reg = readl(mmux->common.base + mux->reg); 761 reg = cv1800_clk_regfield_set(reg, index, mux); 762 763 writel(reg, mmux->common.base + mux->reg); 764 765 spin_unlock_irqrestore(mmux->common.lock, flags); 766 767 release: 768 return 0; 769 } 770 771 const struct clk_ops cv1800_clk_mmux_ops = { 772 .disable = mmux_disable, 773 .enable = mmux_enable, 774 .is_enabled = mmux_is_enabled, 775 776 .determine_rate = mmux_determine_rate, 777 .recalc_rate = mmux_recalc_rate, 778 .set_rate = mmux_set_rate, 779 780 .set_parent = mmux_set_parent, 781 .get_parent = mmux_get_parent, 782 }; 783 784 /* AUDIO CLK */ 785 static inline struct cv1800_clk_audio * 786 hw_to_cv1800_clk_audio(struct clk_hw *hw) 787 { 788 struct cv1800_clk_common *common = hw_to_cv1800_clk_common(hw); 789 790 return container_of(common, struct cv1800_clk_audio, common); 791 } 792 793 static int aclk_enable(struct clk_hw *hw) 794 { 795 struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); 796 797 cv1800_clk_setbit(&aclk->common, &aclk->src_en); 798 return cv1800_clk_setbit(&aclk->common, &aclk->output_en); 799 } 800 801 static void aclk_disable(struct clk_hw *hw) 802 { 803 struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); 804 805 cv1800_clk_clearbit(&aclk->common, &aclk->output_en); 806 cv1800_clk_clearbit(&aclk->common, &aclk->src_en); 807 } 808 809 static int aclk_is_enabled(struct clk_hw *hw) 810 { 811 struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); 812 813 return cv1800_clk_checkbit(&aclk->common, &aclk->output_en); 814 } 815 816 static int aclk_determine_rate(struct clk_hw *hw, 817 struct clk_rate_request *req) 818 { 819 struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); 820 821 req->rate = aclk->target_rate; 822 823 return 0; 824 } 825 826 static unsigned long aclk_recalc_rate(struct clk_hw *hw, 827 unsigned long parent_rate) 828 { 829 struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); 830 u64 rate = parent_rate; 831 u64 factor = 2; 832 u32 regval; 833 834 if (!cv1800_clk_checkbit(&aclk->common, &aclk->div_en)) 835 return 0; 836 837 regval = readl(aclk->common.base + aclk->m.reg); 838 factor *= cv1800_clk_regfield_get(regval, &aclk->m); 839 840 regval = readl(aclk->common.base + aclk->n.reg); 841 rate *= cv1800_clk_regfield_get(regval, &aclk->n); 842 843 return DIV64_U64_ROUND_UP(rate, factor); 844 } 845 846 static void aclk_determine_mn(unsigned long parent_rate, unsigned long rate, 847 u32 *m, u32 *n) 848 { 849 u32 tm = parent_rate / 2; 850 u32 tn = rate; 851 u32 tcommon = gcd(tm, tn); 852 *m = tm / tcommon; 853 *n = tn / tcommon; 854 } 855 856 static int aclk_set_rate(struct clk_hw *hw, unsigned long rate, 857 unsigned long parent_rate) 858 { 859 struct cv1800_clk_audio *aclk = hw_to_cv1800_clk_audio(hw); 860 unsigned long flags; 861 u32 m, n; 862 863 aclk_determine_mn(parent_rate, rate, 864 &m, &n); 865 866 spin_lock_irqsave(aclk->common.lock, flags); 867 868 writel(m, aclk->common.base + aclk->m.reg); 869 writel(n, aclk->common.base + aclk->n.reg); 870 871 cv1800_clk_setbit(&aclk->common, &aclk->div_en); 872 cv1800_clk_setbit(&aclk->common, &aclk->div_up); 873 874 spin_unlock_irqrestore(aclk->common.lock, flags); 875 876 return 0; 877 } 878 879 const struct clk_ops cv1800_clk_audio_ops = { 880 .disable = aclk_disable, 881 .enable = aclk_enable, 882 .is_enabled = aclk_is_enabled, 883 884 .determine_rate = aclk_determine_rate, 885 .recalc_rate = aclk_recalc_rate, 886 .set_rate = aclk_set_rate, 887 }; 888