1 /* 2 * Copyright (c) 2016, Linaro Limited 3 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 4 * 5 * This software is licensed under the terms of the GNU General Public 6 * License version 2, as published by the Free Software Foundation, and 7 * may be copied, distributed, and modified under those terms. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <linux/clk-provider.h> 16 #include <linux/err.h> 17 #include <linux/export.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/mutex.h> 22 #include <linux/mfd/qcom_rpm.h> 23 #include <linux/of.h> 24 #include <linux/of_device.h> 25 #include <linux/platform_device.h> 26 27 #include <dt-bindings/mfd/qcom-rpm.h> 28 #include <dt-bindings/clock/qcom,rpmcc.h> 29 30 #define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63 31 #define QCOM_RPM_SCALING_ENABLE_ID 0x2 32 33 #define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \ 34 static struct clk_rpm _platform##_##_active; \ 35 static struct clk_rpm _platform##_##_name = { \ 36 .rpm_clk_id = (r_id), \ 37 .peer = &_platform##_##_active, \ 38 .rate = INT_MAX, \ 39 .hw.init = &(struct clk_init_data){ \ 40 .ops = &clk_rpm_ops, \ 41 .name = #_name, \ 42 .parent_names = (const char *[]){ "pxo_board" }, \ 43 .num_parents = 1, \ 44 }, \ 45 }; \ 46 static struct clk_rpm _platform##_##_active = { \ 47 .rpm_clk_id = (r_id), \ 48 .peer = &_platform##_##_name, \ 49 .active_only = true, \ 50 .rate = INT_MAX, \ 51 .hw.init = &(struct clk_init_data){ \ 52 .ops = &clk_rpm_ops, \ 53 .name = #_active, \ 54 .parent_names = (const char *[]){ "pxo_board" }, \ 55 .num_parents = 1, \ 56 }, \ 57 } 58 59 #define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \ 60 static struct clk_rpm _platform##_##_active; \ 61 static struct clk_rpm _platform##_##_name = { \ 62 .rpm_clk_id = (r_id), \ 63 .active_only = true, \ 64 .peer = &_platform##_##_active, \ 65 .rate = (r), \ 66 .branch = true, \ 67 .hw.init = &(struct clk_init_data){ \ 68 .ops = &clk_rpm_branch_ops, \ 69 .name = #_name, \ 70 .parent_names = (const char *[]){ "pxo_board" }, \ 71 .num_parents = 1, \ 72 }, \ 73 }; \ 74 static struct clk_rpm _platform##_##_active = { \ 75 .rpm_clk_id = (r_id), \ 76 .peer = &_platform##_##_name, \ 77 .rate = (r), \ 78 .branch = true, \ 79 .hw.init = &(struct clk_init_data){ \ 80 .ops = &clk_rpm_branch_ops, \ 81 .name = #_active, \ 82 .parent_names = (const char *[]){ "pxo_board" }, \ 83 .num_parents = 1, \ 84 }, \ 85 } 86 87 #define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \ 88 static struct clk_rpm _platform##_##_active; \ 89 static struct clk_rpm _platform##_##_name = { \ 90 .rpm_clk_id = (r_id), \ 91 .peer = &_platform##_##_active, \ 92 .rate = (r), \ 93 .branch = true, \ 94 .hw.init = &(struct clk_init_data){ \ 95 .ops = &clk_rpm_branch_ops, \ 96 .name = #_name, \ 97 .parent_names = (const char *[]){ "cxo_board" }, \ 98 .num_parents = 1, \ 99 }, \ 100 }; \ 101 static struct clk_rpm _platform##_##_active = { \ 102 .rpm_clk_id = (r_id), \ 103 .active_only = true, \ 104 .peer = &_platform##_##_name, \ 105 .rate = (r), \ 106 .branch = true, \ 107 .hw.init = &(struct clk_init_data){ \ 108 .ops = &clk_rpm_branch_ops, \ 109 .name = #_active, \ 110 .parent_names = (const char *[]){ "cxo_board" }, \ 111 .num_parents = 1, \ 112 }, \ 113 } 114 115 #define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw) 116 117 struct clk_rpm { 118 const int rpm_clk_id; 119 const bool active_only; 120 unsigned long rate; 121 bool enabled; 122 bool branch; 123 struct clk_rpm *peer; 124 struct clk_hw hw; 125 struct qcom_rpm *rpm; 126 }; 127 128 struct rpm_cc { 129 struct qcom_rpm *rpm; 130 struct clk_rpm **clks; 131 size_t num_clks; 132 }; 133 134 struct rpm_clk_desc { 135 struct clk_rpm **clks; 136 size_t num_clks; 137 }; 138 139 static DEFINE_MUTEX(rpm_clk_lock); 140 141 static int clk_rpm_handoff(struct clk_rpm *r) 142 { 143 int ret; 144 u32 value = INT_MAX; 145 146 ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, 147 r->rpm_clk_id, &value, 1); 148 if (ret) 149 return ret; 150 ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE, 151 r->rpm_clk_id, &value, 1); 152 if (ret) 153 return ret; 154 155 return 0; 156 } 157 158 static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate) 159 { 160 u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ 161 162 return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, 163 r->rpm_clk_id, &value, 1); 164 } 165 166 static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate) 167 { 168 u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ 169 170 return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE, 171 r->rpm_clk_id, &value, 1); 172 } 173 174 static void to_active_sleep(struct clk_rpm *r, unsigned long rate, 175 unsigned long *active, unsigned long *sleep) 176 { 177 *active = rate; 178 179 /* 180 * Active-only clocks don't care what the rate is during sleep. So, 181 * they vote for zero. 182 */ 183 if (r->active_only) 184 *sleep = 0; 185 else 186 *sleep = *active; 187 } 188 189 static int clk_rpm_prepare(struct clk_hw *hw) 190 { 191 struct clk_rpm *r = to_clk_rpm(hw); 192 struct clk_rpm *peer = r->peer; 193 unsigned long this_rate = 0, this_sleep_rate = 0; 194 unsigned long peer_rate = 0, peer_sleep_rate = 0; 195 unsigned long active_rate, sleep_rate; 196 int ret = 0; 197 198 mutex_lock(&rpm_clk_lock); 199 200 /* Don't send requests to the RPM if the rate has not been set. */ 201 if (!r->rate) 202 goto out; 203 204 to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate); 205 206 /* Take peer clock's rate into account only if it's enabled. */ 207 if (peer->enabled) 208 to_active_sleep(peer, peer->rate, 209 &peer_rate, &peer_sleep_rate); 210 211 active_rate = max(this_rate, peer_rate); 212 213 if (r->branch) 214 active_rate = !!active_rate; 215 216 ret = clk_rpm_set_rate_active(r, active_rate); 217 if (ret) 218 goto out; 219 220 sleep_rate = max(this_sleep_rate, peer_sleep_rate); 221 if (r->branch) 222 sleep_rate = !!sleep_rate; 223 224 ret = clk_rpm_set_rate_sleep(r, sleep_rate); 225 if (ret) 226 /* Undo the active set vote and restore it */ 227 ret = clk_rpm_set_rate_active(r, peer_rate); 228 229 out: 230 if (!ret) 231 r->enabled = true; 232 233 mutex_unlock(&rpm_clk_lock); 234 235 return ret; 236 } 237 238 static void clk_rpm_unprepare(struct clk_hw *hw) 239 { 240 struct clk_rpm *r = to_clk_rpm(hw); 241 struct clk_rpm *peer = r->peer; 242 unsigned long peer_rate = 0, peer_sleep_rate = 0; 243 unsigned long active_rate, sleep_rate; 244 int ret; 245 246 mutex_lock(&rpm_clk_lock); 247 248 if (!r->rate) 249 goto out; 250 251 /* Take peer clock's rate into account only if it's enabled. */ 252 if (peer->enabled) 253 to_active_sleep(peer, peer->rate, &peer_rate, 254 &peer_sleep_rate); 255 256 active_rate = r->branch ? !!peer_rate : peer_rate; 257 ret = clk_rpm_set_rate_active(r, active_rate); 258 if (ret) 259 goto out; 260 261 sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate; 262 ret = clk_rpm_set_rate_sleep(r, sleep_rate); 263 if (ret) 264 goto out; 265 266 r->enabled = false; 267 268 out: 269 mutex_unlock(&rpm_clk_lock); 270 } 271 272 static int clk_rpm_set_rate(struct clk_hw *hw, 273 unsigned long rate, unsigned long parent_rate) 274 { 275 struct clk_rpm *r = to_clk_rpm(hw); 276 struct clk_rpm *peer = r->peer; 277 unsigned long active_rate, sleep_rate; 278 unsigned long this_rate = 0, this_sleep_rate = 0; 279 unsigned long peer_rate = 0, peer_sleep_rate = 0; 280 int ret = 0; 281 282 mutex_lock(&rpm_clk_lock); 283 284 if (!r->enabled) 285 goto out; 286 287 to_active_sleep(r, rate, &this_rate, &this_sleep_rate); 288 289 /* Take peer clock's rate into account only if it's enabled. */ 290 if (peer->enabled) 291 to_active_sleep(peer, peer->rate, 292 &peer_rate, &peer_sleep_rate); 293 294 active_rate = max(this_rate, peer_rate); 295 ret = clk_rpm_set_rate_active(r, active_rate); 296 if (ret) 297 goto out; 298 299 sleep_rate = max(this_sleep_rate, peer_sleep_rate); 300 ret = clk_rpm_set_rate_sleep(r, sleep_rate); 301 if (ret) 302 goto out; 303 304 r->rate = rate; 305 306 out: 307 mutex_unlock(&rpm_clk_lock); 308 309 return ret; 310 } 311 312 static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate, 313 unsigned long *parent_rate) 314 { 315 /* 316 * RPM handles rate rounding and we don't have a way to 317 * know what the rate will be, so just return whatever 318 * rate is requested. 319 */ 320 return rate; 321 } 322 323 static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw, 324 unsigned long parent_rate) 325 { 326 struct clk_rpm *r = to_clk_rpm(hw); 327 328 /* 329 * RPM handles rate rounding and we don't have a way to 330 * know what the rate will be, so just return whatever 331 * rate was set. 332 */ 333 return r->rate; 334 } 335 336 static const struct clk_ops clk_rpm_ops = { 337 .prepare = clk_rpm_prepare, 338 .unprepare = clk_rpm_unprepare, 339 .set_rate = clk_rpm_set_rate, 340 .round_rate = clk_rpm_round_rate, 341 .recalc_rate = clk_rpm_recalc_rate, 342 }; 343 344 static const struct clk_ops clk_rpm_branch_ops = { 345 .prepare = clk_rpm_prepare, 346 .unprepare = clk_rpm_unprepare, 347 .round_rate = clk_rpm_round_rate, 348 .recalc_rate = clk_rpm_recalc_rate, 349 }; 350 351 /* apq8064 */ 352 DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK); 353 DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK); 354 DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); 355 DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK); 356 DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK); 357 DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK); 358 DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK); 359 DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); 360 DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK); 361 362 static struct clk_rpm *apq8064_clks[] = { 363 [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk, 364 [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk, 365 [RPM_CFPB_CLK] = &apq8064_cfpb_clk, 366 [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk, 367 [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk, 368 [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk, 369 [RPM_EBI1_CLK] = &apq8064_ebi1_clk, 370 [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk, 371 [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk, 372 [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk, 373 [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk, 374 [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk, 375 [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk, 376 [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk, 377 [RPM_SFPB_CLK] = &apq8064_sfpb_clk, 378 [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk, 379 [RPM_QDSS_CLK] = &apq8064_qdss_clk, 380 [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk, 381 }; 382 383 static const struct rpm_clk_desc rpm_clk_apq8064 = { 384 .clks = apq8064_clks, 385 .num_clks = ARRAY_SIZE(apq8064_clks), 386 }; 387 388 static const struct of_device_id rpm_clk_match_table[] = { 389 { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 }, 390 { } 391 }; 392 MODULE_DEVICE_TABLE(of, rpm_clk_match_table); 393 394 static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec, 395 void *data) 396 { 397 struct rpm_cc *rcc = data; 398 unsigned int idx = clkspec->args[0]; 399 400 if (idx >= rcc->num_clks) { 401 pr_err("%s: invalid index %u\n", __func__, idx); 402 return ERR_PTR(-EINVAL); 403 } 404 405 return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT); 406 } 407 408 static int rpm_clk_probe(struct platform_device *pdev) 409 { 410 struct rpm_cc *rcc; 411 int ret; 412 size_t num_clks, i; 413 struct qcom_rpm *rpm; 414 struct clk_rpm **rpm_clks; 415 const struct rpm_clk_desc *desc; 416 417 rpm = dev_get_drvdata(pdev->dev.parent); 418 if (!rpm) { 419 dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); 420 return -ENODEV; 421 } 422 423 desc = of_device_get_match_data(&pdev->dev); 424 if (!desc) 425 return -EINVAL; 426 427 rpm_clks = desc->clks; 428 num_clks = desc->num_clks; 429 430 rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL); 431 if (!rcc) 432 return -ENOMEM; 433 434 rcc->clks = rpm_clks; 435 rcc->num_clks = num_clks; 436 437 for (i = 0; i < num_clks; i++) { 438 if (!rpm_clks[i]) 439 continue; 440 441 rpm_clks[i]->rpm = rpm; 442 443 ret = clk_rpm_handoff(rpm_clks[i]); 444 if (ret) 445 goto err; 446 } 447 448 for (i = 0; i < num_clks; i++) { 449 if (!rpm_clks[i]) 450 continue; 451 452 ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw); 453 if (ret) 454 goto err; 455 } 456 457 ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get, 458 rcc); 459 if (ret) 460 goto err; 461 462 return 0; 463 err: 464 dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret); 465 return ret; 466 } 467 468 static int rpm_clk_remove(struct platform_device *pdev) 469 { 470 of_clk_del_provider(pdev->dev.of_node); 471 return 0; 472 } 473 474 static struct platform_driver rpm_clk_driver = { 475 .driver = { 476 .name = "qcom-clk-rpm", 477 .of_match_table = rpm_clk_match_table, 478 }, 479 .probe = rpm_clk_probe, 480 .remove = rpm_clk_remove, 481 }; 482 483 static int __init rpm_clk_init(void) 484 { 485 return platform_driver_register(&rpm_clk_driver); 486 } 487 core_initcall(rpm_clk_init); 488 489 static void __exit rpm_clk_exit(void) 490 { 491 platform_driver_unregister(&rpm_clk_driver); 492 } 493 module_exit(rpm_clk_exit); 494 495 MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver"); 496 MODULE_LICENSE("GPL v2"); 497 MODULE_ALIAS("platform:qcom-clk-rpm"); 498