1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020-2022 MaxLinear, Inc. 4 * Copyright (C) 2020 Intel Corporation. 5 * Zhu Yixin <yzhu@maxlinear.com> 6 * Rahul Tanwar <rtanwar@maxlinear.com> 7 */ 8 #include <linux/clk-provider.h> 9 #include <linux/device.h> 10 #include <linux/of.h> 11 12 #include "clk-cgu.h" 13 14 #define GATE_HW_REG_STAT(reg) ((reg) + 0x0) 15 #define GATE_HW_REG_EN(reg) ((reg) + 0x4) 16 #define GATE_HW_REG_DIS(reg) ((reg) + 0x8) 17 #define MAX_DDIV_REG 8 18 #define MAX_DIVIDER_VAL 64 19 20 #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw) 21 #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw) 22 #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw) 23 #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw) 24 25 static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, 26 const struct lgm_clk_branch *list) 27 { 28 29 if (list->div_flags & CLOCK_FLAG_VAL_INIT) 30 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, 31 list->div_width, list->div_val); 32 33 return clk_hw_register_fixed_rate(NULL, list->name, 34 list->parent_data[0].name, 35 list->flags, list->mux_flags); 36 } 37 38 static u8 lgm_clk_mux_get_parent(struct clk_hw *hw) 39 { 40 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 41 u32 val; 42 43 if (mux->flags & MUX_CLK_SW) 44 val = mux->reg; 45 else 46 val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift, 47 mux->width); 48 return clk_mux_val_to_index(hw, NULL, mux->flags, val); 49 } 50 51 static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index) 52 { 53 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 54 u32 val; 55 56 val = clk_mux_index_to_val(NULL, mux->flags, index); 57 if (mux->flags & MUX_CLK_SW) 58 mux->reg = val; 59 else 60 lgm_set_clk_val(mux->membase, mux->reg, mux->shift, 61 mux->width, val); 62 63 return 0; 64 } 65 66 static int lgm_clk_mux_determine_rate(struct clk_hw *hw, 67 struct clk_rate_request *req) 68 { 69 struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); 70 71 return clk_mux_determine_rate_flags(hw, req, mux->flags); 72 } 73 74 static const struct clk_ops lgm_clk_mux_ops = { 75 .get_parent = lgm_clk_mux_get_parent, 76 .set_parent = lgm_clk_mux_set_parent, 77 .determine_rate = lgm_clk_mux_determine_rate, 78 }; 79 80 static struct clk_hw * 81 lgm_clk_register_mux(struct lgm_clk_provider *ctx, 82 const struct lgm_clk_branch *list) 83 { 84 unsigned long cflags = list->mux_flags; 85 struct device *dev = ctx->dev; 86 u8 shift = list->mux_shift; 87 u8 width = list->mux_width; 88 struct clk_init_data init = {}; 89 struct lgm_clk_mux *mux; 90 u32 reg = list->mux_off; 91 struct clk_hw *hw; 92 int ret; 93 94 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); 95 if (!mux) 96 return ERR_PTR(-ENOMEM); 97 98 init.name = list->name; 99 init.ops = &lgm_clk_mux_ops; 100 init.flags = list->flags; 101 init.parent_data = list->parent_data; 102 init.num_parents = list->num_parents; 103 104 mux->membase = ctx->membase; 105 mux->reg = reg; 106 mux->shift = shift; 107 mux->width = width; 108 mux->flags = cflags; 109 mux->hw.init = &init; 110 111 hw = &mux->hw; 112 ret = devm_clk_hw_register(dev, hw); 113 if (ret) 114 return ERR_PTR(ret); 115 116 if (cflags & CLOCK_FLAG_VAL_INIT) 117 lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val); 118 119 return hw; 120 } 121 122 static unsigned long 123 lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 124 { 125 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 126 unsigned int val; 127 128 val = lgm_get_clk_val(divider->membase, divider->reg, 129 divider->shift, divider->width); 130 131 return divider_recalc_rate(hw, parent_rate, val, divider->table, 132 divider->flags, divider->width); 133 } 134 135 static long 136 lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, 137 unsigned long *prate) 138 { 139 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 140 141 return divider_round_rate(hw, rate, prate, divider->table, 142 divider->width, divider->flags); 143 } 144 145 static int 146 lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 147 unsigned long prate) 148 { 149 struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); 150 int value; 151 152 value = divider_get_val(rate, prate, divider->table, 153 divider->width, divider->flags); 154 if (value < 0) 155 return value; 156 157 lgm_set_clk_val(divider->membase, divider->reg, 158 divider->shift, divider->width, value); 159 160 return 0; 161 } 162 163 static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable) 164 { 165 struct lgm_clk_divider *div = to_lgm_clk_divider(hw); 166 167 if (div->flags != DIV_CLK_NO_MASK) 168 lgm_set_clk_val(div->membase, div->reg, div->shift_gate, 169 div->width_gate, enable); 170 return 0; 171 } 172 173 static int lgm_clk_divider_enable(struct clk_hw *hw) 174 { 175 return lgm_clk_divider_enable_disable(hw, 1); 176 } 177 178 static void lgm_clk_divider_disable(struct clk_hw *hw) 179 { 180 lgm_clk_divider_enable_disable(hw, 0); 181 } 182 183 static const struct clk_ops lgm_clk_divider_ops = { 184 .recalc_rate = lgm_clk_divider_recalc_rate, 185 .round_rate = lgm_clk_divider_round_rate, 186 .set_rate = lgm_clk_divider_set_rate, 187 .enable = lgm_clk_divider_enable, 188 .disable = lgm_clk_divider_disable, 189 }; 190 191 static struct clk_hw * 192 lgm_clk_register_divider(struct lgm_clk_provider *ctx, 193 const struct lgm_clk_branch *list) 194 { 195 unsigned long cflags = list->div_flags; 196 struct device *dev = ctx->dev; 197 struct lgm_clk_divider *div; 198 struct clk_init_data init = {}; 199 u8 shift = list->div_shift; 200 u8 width = list->div_width; 201 u8 shift_gate = list->div_shift_gate; 202 u8 width_gate = list->div_width_gate; 203 u32 reg = list->div_off; 204 struct clk_hw *hw; 205 int ret; 206 207 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); 208 if (!div) 209 return ERR_PTR(-ENOMEM); 210 211 init.name = list->name; 212 init.ops = &lgm_clk_divider_ops; 213 init.flags = list->flags; 214 init.parent_data = list->parent_data; 215 init.num_parents = 1; 216 217 div->membase = ctx->membase; 218 div->reg = reg; 219 div->shift = shift; 220 div->width = width; 221 div->shift_gate = shift_gate; 222 div->width_gate = width_gate; 223 div->flags = cflags; 224 div->table = list->div_table; 225 div->hw.init = &init; 226 227 hw = &div->hw; 228 ret = devm_clk_hw_register(dev, hw); 229 if (ret) 230 return ERR_PTR(ret); 231 232 if (cflags & CLOCK_FLAG_VAL_INIT) 233 lgm_set_clk_val(div->membase, reg, shift, width, list->div_val); 234 235 return hw; 236 } 237 238 static struct clk_hw * 239 lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, 240 const struct lgm_clk_branch *list) 241 { 242 struct clk_hw *hw; 243 244 hw = clk_hw_register_fixed_factor(ctx->dev, list->name, 245 list->parent_data[0].name, list->flags, 246 list->mult, list->div); 247 if (IS_ERR(hw)) 248 return ERR_CAST(hw); 249 250 if (list->div_flags & CLOCK_FLAG_VAL_INIT) 251 lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, 252 list->div_width, list->div_val); 253 254 return hw; 255 } 256 257 static int lgm_clk_gate_enable(struct clk_hw *hw) 258 { 259 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 260 unsigned int reg; 261 262 reg = GATE_HW_REG_EN(gate->reg); 263 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); 264 265 return 0; 266 } 267 268 static void lgm_clk_gate_disable(struct clk_hw *hw) 269 { 270 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 271 unsigned int reg; 272 273 reg = GATE_HW_REG_DIS(gate->reg); 274 lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); 275 } 276 277 static int lgm_clk_gate_is_enabled(struct clk_hw *hw) 278 { 279 struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); 280 unsigned int reg, ret; 281 282 reg = GATE_HW_REG_STAT(gate->reg); 283 ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1); 284 285 return ret; 286 } 287 288 static const struct clk_ops lgm_clk_gate_ops = { 289 .enable = lgm_clk_gate_enable, 290 .disable = lgm_clk_gate_disable, 291 .is_enabled = lgm_clk_gate_is_enabled, 292 }; 293 294 static struct clk_hw * 295 lgm_clk_register_gate(struct lgm_clk_provider *ctx, 296 const struct lgm_clk_branch *list) 297 { 298 unsigned long cflags = list->gate_flags; 299 const char *pname = list->parent_data[0].name; 300 struct device *dev = ctx->dev; 301 u8 shift = list->gate_shift; 302 struct clk_init_data init = {}; 303 struct lgm_clk_gate *gate; 304 u32 reg = list->gate_off; 305 struct clk_hw *hw; 306 int ret; 307 308 gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL); 309 if (!gate) 310 return ERR_PTR(-ENOMEM); 311 312 init.name = list->name; 313 init.ops = &lgm_clk_gate_ops; 314 init.flags = list->flags; 315 init.parent_names = pname ? &pname : NULL; 316 init.num_parents = pname ? 1 : 0; 317 318 gate->membase = ctx->membase; 319 gate->reg = reg; 320 gate->shift = shift; 321 gate->flags = cflags; 322 gate->hw.init = &init; 323 324 hw = &gate->hw; 325 ret = devm_clk_hw_register(dev, hw); 326 if (ret) 327 return ERR_PTR(ret); 328 329 if (cflags & CLOCK_FLAG_VAL_INIT) { 330 lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val); 331 } 332 333 return hw; 334 } 335 336 int lgm_clk_register_branches(struct lgm_clk_provider *ctx, 337 const struct lgm_clk_branch *list, 338 unsigned int nr_clk) 339 { 340 struct clk_hw *hw; 341 unsigned int idx; 342 343 for (idx = 0; idx < nr_clk; idx++, list++) { 344 switch (list->type) { 345 case CLK_TYPE_FIXED: 346 hw = lgm_clk_register_fixed(ctx, list); 347 break; 348 case CLK_TYPE_MUX: 349 hw = lgm_clk_register_mux(ctx, list); 350 break; 351 case CLK_TYPE_DIVIDER: 352 hw = lgm_clk_register_divider(ctx, list); 353 break; 354 case CLK_TYPE_FIXED_FACTOR: 355 hw = lgm_clk_register_fixed_factor(ctx, list); 356 break; 357 case CLK_TYPE_GATE: 358 if (list->gate_flags & GATE_CLK_HW) { 359 hw = lgm_clk_register_gate(ctx, list); 360 } else { 361 /* 362 * GATE_CLKs can be controlled either from 363 * CGU clk driver i.e. this driver or directly 364 * from power management driver/daemon. It is 365 * dependent on the power policy/profile requirements 366 * of the end product. To override control of gate 367 * clks from this driver, provide NULL for this index 368 * of gate clk provider. 369 */ 370 hw = NULL; 371 } 372 break; 373 374 default: 375 dev_err(ctx->dev, "invalid clk type\n"); 376 return -EINVAL; 377 } 378 379 if (IS_ERR(hw)) { 380 dev_err(ctx->dev, 381 "register clk: %s, type: %u failed!\n", 382 list->name, list->type); 383 return -EIO; 384 } 385 ctx->clk_data.hws[list->id] = hw; 386 } 387 388 return 0; 389 } 390 391 static unsigned long 392 lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 393 { 394 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 395 unsigned int div0, div1, exdiv; 396 u64 prate; 397 398 div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg, 399 ddiv->shift0, ddiv->width0) + 1; 400 div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg, 401 ddiv->shift1, ddiv->width1) + 1; 402 exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg, 403 ddiv->shift2, ddiv->width2); 404 prate = (u64)parent_rate; 405 do_div(prate, div0); 406 do_div(prate, div1); 407 408 if (exdiv) { 409 do_div(prate, ddiv->div); 410 prate *= ddiv->mult; 411 } 412 413 return prate; 414 } 415 416 static int lgm_clk_ddiv_enable(struct clk_hw *hw) 417 { 418 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 419 420 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, 421 ddiv->width_gate, 1); 422 return 0; 423 } 424 425 static void lgm_clk_ddiv_disable(struct clk_hw *hw) 426 { 427 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 428 429 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, 430 ddiv->width_gate, 0); 431 } 432 433 static int 434 lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2) 435 { 436 u32 idx, temp; 437 438 *ddiv1 = 1; 439 *ddiv2 = 1; 440 441 if (div > MAX_DIVIDER_VAL) 442 div = MAX_DIVIDER_VAL; 443 444 if (div > 1) { 445 for (idx = 2; idx <= MAX_DDIV_REG; idx++) { 446 temp = DIV_ROUND_UP_ULL((u64)div, idx); 447 if (div % idx == 0 && temp <= MAX_DDIV_REG) 448 break; 449 } 450 451 if (idx > MAX_DDIV_REG) 452 return -EINVAL; 453 454 *ddiv1 = temp; 455 *ddiv2 = idx; 456 } 457 458 return 0; 459 } 460 461 static int 462 lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, 463 unsigned long prate) 464 { 465 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 466 u32 div, ddiv1, ddiv2; 467 468 div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate); 469 470 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 471 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 472 div = div * 2; 473 } 474 475 if (div <= 0) 476 return -EINVAL; 477 478 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) 479 return -EINVAL; 480 481 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0, 482 ddiv1 - 1); 483 484 lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1, 485 ddiv2 - 1); 486 487 return 0; 488 } 489 490 static long 491 lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, 492 unsigned long *prate) 493 { 494 struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); 495 u32 div, ddiv1, ddiv2; 496 u64 rate64; 497 498 div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); 499 500 /* if predivide bit is enabled, modify div by factor of 2.5 */ 501 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 502 div = div * 2; 503 div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); 504 } 505 506 if (div <= 0) 507 return *prate; 508 509 if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) 510 if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) 511 return -EINVAL; 512 513 rate64 = *prate; 514 do_div(rate64, ddiv1); 515 do_div(rate64, ddiv2); 516 517 /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ 518 if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { 519 rate64 = rate64 * 2; 520 rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); 521 } 522 523 return rate64; 524 } 525 526 static const struct clk_ops lgm_clk_ddiv_ops = { 527 .recalc_rate = lgm_clk_ddiv_recalc_rate, 528 .enable = lgm_clk_ddiv_enable, 529 .disable = lgm_clk_ddiv_disable, 530 .set_rate = lgm_clk_ddiv_set_rate, 531 .round_rate = lgm_clk_ddiv_round_rate, 532 }; 533 534 int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, 535 const struct lgm_clk_ddiv_data *list, 536 unsigned int nr_clk) 537 { 538 struct device *dev = ctx->dev; 539 struct clk_hw *hw; 540 unsigned int idx; 541 int ret; 542 543 for (idx = 0; idx < nr_clk; idx++, list++) { 544 struct clk_init_data init = {}; 545 struct lgm_clk_ddiv *ddiv; 546 547 ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL); 548 if (!ddiv) 549 return -ENOMEM; 550 551 init.name = list->name; 552 init.ops = &lgm_clk_ddiv_ops; 553 init.flags = list->flags; 554 init.parent_data = list->parent_data; 555 init.num_parents = 1; 556 557 ddiv->membase = ctx->membase; 558 ddiv->reg = list->reg; 559 ddiv->shift0 = list->shift0; 560 ddiv->width0 = list->width0; 561 ddiv->shift1 = list->shift1; 562 ddiv->width1 = list->width1; 563 ddiv->shift_gate = list->shift_gate; 564 ddiv->width_gate = list->width_gate; 565 ddiv->shift2 = list->ex_shift; 566 ddiv->width2 = list->ex_width; 567 ddiv->flags = list->div_flags; 568 ddiv->mult = 2; 569 ddiv->div = 5; 570 ddiv->hw.init = &init; 571 572 hw = &ddiv->hw; 573 ret = devm_clk_hw_register(dev, hw); 574 if (ret) { 575 dev_err(dev, "register clk: %s failed!\n", list->name); 576 return ret; 577 } 578 ctx->clk_data.hws[list->id] = hw; 579 } 580 581 return 0; 582 } 583