1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 5 */ 6 7 #include <linux/clk-provider.h> 8 #include <linux/err.h> 9 #include <linux/export.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/mfd/qcom_rpm.h> 15 #include <linux/of.h> 16 #include <linux/of_device.h> 17 #include <linux/platform_device.h> 18 19 #include <dt-bindings/mfd/qcom-rpm.h> 20 #include <dt-bindings/clock/qcom,rpmcc.h> 21 22 #define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63 23 #define QCOM_RPM_SCALING_ENABLE_ID 0x2 24 #define QCOM_RPM_XO_MODE_ON 0x2 25 26 static const struct clk_parent_data gcc_pxo[] = { 27 { .fw_name = "pxo", .name = "pxo_board" }, 28 }; 29 30 static const struct clk_parent_data gcc_cxo[] = { 31 { .fw_name = "cxo", .name = "cxo_board" }, 32 }; 33 34 #define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \ 35 static struct clk_rpm _platform##_##_active; \ 36 static struct clk_rpm _platform##_##_name = { \ 37 .rpm_clk_id = (r_id), \ 38 .peer = &_platform##_##_active, \ 39 .rate = INT_MAX, \ 40 .hw.init = &(struct clk_init_data){ \ 41 .ops = &clk_rpm_ops, \ 42 .name = #_name, \ 43 .parent_data = gcc_pxo, \ 44 .num_parents = ARRAY_SIZE(gcc_pxo), \ 45 }, \ 46 }; \ 47 static struct clk_rpm _platform##_##_active = { \ 48 .rpm_clk_id = (r_id), \ 49 .peer = &_platform##_##_name, \ 50 .active_only = true, \ 51 .rate = INT_MAX, \ 52 .hw.init = &(struct clk_init_data){ \ 53 .ops = &clk_rpm_ops, \ 54 .name = #_active, \ 55 .parent_data = gcc_pxo, \ 56 .num_parents = ARRAY_SIZE(gcc_pxo), \ 57 }, \ 58 } 59 60 #define DEFINE_CLK_RPM_XO_BUFFER(_platform, _name, _active, offset) \ 61 static struct clk_rpm _platform##_##_name = { \ 62 .rpm_clk_id = QCOM_RPM_CXO_BUFFERS, \ 63 .xo_offset = (offset), \ 64 .hw.init = &(struct clk_init_data){ \ 65 .ops = &clk_rpm_xo_ops, \ 66 .name = #_name, \ 67 .parent_data = gcc_cxo, \ 68 .num_parents = ARRAY_SIZE(gcc_cxo), \ 69 }, \ 70 } 71 72 #define DEFINE_CLK_RPM_FIXED(_platform, _name, _active, r_id, r) \ 73 static struct clk_rpm _platform##_##_name = { \ 74 .rpm_clk_id = (r_id), \ 75 .rate = (r), \ 76 .hw.init = &(struct clk_init_data){ \ 77 .ops = &clk_rpm_fixed_ops, \ 78 .name = #_name, \ 79 .parent_data = gcc_pxo, \ 80 .num_parents = ARRAY_SIZE(gcc_pxo), \ 81 }, \ 82 } 83 84 #define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw) 85 86 struct rpm_cc; 87 88 struct clk_rpm { 89 const int rpm_clk_id; 90 const int xo_offset; 91 const bool active_only; 92 unsigned long rate; 93 bool enabled; 94 bool branch; 95 struct clk_rpm *peer; 96 struct clk_hw hw; 97 struct qcom_rpm *rpm; 98 struct rpm_cc *rpm_cc; 99 }; 100 101 struct rpm_cc { 102 struct qcom_rpm *rpm; 103 struct clk_rpm **clks; 104 size_t num_clks; 105 u32 xo_buffer_value; 106 struct mutex xo_lock; 107 }; 108 109 struct rpm_clk_desc { 110 struct clk_rpm **clks; 111 size_t num_clks; 112 }; 113 114 static DEFINE_MUTEX(rpm_clk_lock); 115 116 static int clk_rpm_handoff(struct clk_rpm *r) 117 { 118 int ret; 119 u32 value = INT_MAX; 120 121 /* 122 * The vendor tree simply reads the status for this 123 * RPM clock. 124 */ 125 if (r->rpm_clk_id == QCOM_RPM_PLL_4 || 126 r->rpm_clk_id == QCOM_RPM_CXO_BUFFERS) 127 return 0; 128 129 ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, 130 r->rpm_clk_id, &value, 1); 131 if (ret) 132 return ret; 133 ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE, 134 r->rpm_clk_id, &value, 1); 135 if (ret) 136 return ret; 137 138 return 0; 139 } 140 141 static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate) 142 { 143 u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ 144 145 return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, 146 r->rpm_clk_id, &value, 1); 147 } 148 149 static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate) 150 { 151 u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ 152 153 return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE, 154 r->rpm_clk_id, &value, 1); 155 } 156 157 static void to_active_sleep(struct clk_rpm *r, unsigned long rate, 158 unsigned long *active, unsigned long *sleep) 159 { 160 *active = rate; 161 162 /* 163 * Active-only clocks don't care what the rate is during sleep. So, 164 * they vote for zero. 165 */ 166 if (r->active_only) 167 *sleep = 0; 168 else 169 *sleep = *active; 170 } 171 172 static int clk_rpm_prepare(struct clk_hw *hw) 173 { 174 struct clk_rpm *r = to_clk_rpm(hw); 175 struct clk_rpm *peer = r->peer; 176 unsigned long this_rate = 0, this_sleep_rate = 0; 177 unsigned long peer_rate = 0, peer_sleep_rate = 0; 178 unsigned long active_rate, sleep_rate; 179 int ret = 0; 180 181 mutex_lock(&rpm_clk_lock); 182 183 /* Don't send requests to the RPM if the rate has not been set. */ 184 if (!r->rate) 185 goto out; 186 187 to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate); 188 189 /* Take peer clock's rate into account only if it's enabled. */ 190 if (peer->enabled) 191 to_active_sleep(peer, peer->rate, 192 &peer_rate, &peer_sleep_rate); 193 194 active_rate = max(this_rate, peer_rate); 195 196 if (r->branch) 197 active_rate = !!active_rate; 198 199 ret = clk_rpm_set_rate_active(r, active_rate); 200 if (ret) 201 goto out; 202 203 sleep_rate = max(this_sleep_rate, peer_sleep_rate); 204 if (r->branch) 205 sleep_rate = !!sleep_rate; 206 207 ret = clk_rpm_set_rate_sleep(r, sleep_rate); 208 if (ret) 209 /* Undo the active set vote and restore it */ 210 ret = clk_rpm_set_rate_active(r, peer_rate); 211 212 out: 213 if (!ret) 214 r->enabled = true; 215 216 mutex_unlock(&rpm_clk_lock); 217 218 return ret; 219 } 220 221 static void clk_rpm_unprepare(struct clk_hw *hw) 222 { 223 struct clk_rpm *r = to_clk_rpm(hw); 224 struct clk_rpm *peer = r->peer; 225 unsigned long peer_rate = 0, peer_sleep_rate = 0; 226 unsigned long active_rate, sleep_rate; 227 int ret; 228 229 mutex_lock(&rpm_clk_lock); 230 231 if (!r->rate) 232 goto out; 233 234 /* Take peer clock's rate into account only if it's enabled. */ 235 if (peer->enabled) 236 to_active_sleep(peer, peer->rate, &peer_rate, 237 &peer_sleep_rate); 238 239 active_rate = r->branch ? !!peer_rate : peer_rate; 240 ret = clk_rpm_set_rate_active(r, active_rate); 241 if (ret) 242 goto out; 243 244 sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate; 245 ret = clk_rpm_set_rate_sleep(r, sleep_rate); 246 if (ret) 247 goto out; 248 249 r->enabled = false; 250 251 out: 252 mutex_unlock(&rpm_clk_lock); 253 } 254 255 static int clk_rpm_xo_prepare(struct clk_hw *hw) 256 { 257 struct clk_rpm *r = to_clk_rpm(hw); 258 struct rpm_cc *rcc = r->rpm_cc; 259 int ret, clk_id = r->rpm_clk_id; 260 u32 value; 261 262 mutex_lock(&rcc->xo_lock); 263 264 value = rcc->xo_buffer_value | (QCOM_RPM_XO_MODE_ON << r->xo_offset); 265 ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1); 266 if (!ret) { 267 r->enabled = true; 268 rcc->xo_buffer_value = value; 269 } 270 271 mutex_unlock(&rcc->xo_lock); 272 273 return ret; 274 } 275 276 static void clk_rpm_xo_unprepare(struct clk_hw *hw) 277 { 278 struct clk_rpm *r = to_clk_rpm(hw); 279 struct rpm_cc *rcc = r->rpm_cc; 280 int ret, clk_id = r->rpm_clk_id; 281 u32 value; 282 283 mutex_lock(&rcc->xo_lock); 284 285 value = rcc->xo_buffer_value & ~(QCOM_RPM_XO_MODE_ON << r->xo_offset); 286 ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1); 287 if (!ret) { 288 r->enabled = false; 289 rcc->xo_buffer_value = value; 290 } 291 292 mutex_unlock(&rcc->xo_lock); 293 } 294 295 static int clk_rpm_fixed_prepare(struct clk_hw *hw) 296 { 297 struct clk_rpm *r = to_clk_rpm(hw); 298 u32 value = 1; 299 int ret; 300 301 ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, 302 r->rpm_clk_id, &value, 1); 303 if (!ret) 304 r->enabled = true; 305 306 return ret; 307 } 308 309 static void clk_rpm_fixed_unprepare(struct clk_hw *hw) 310 { 311 struct clk_rpm *r = to_clk_rpm(hw); 312 u32 value = 0; 313 int ret; 314 315 ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, 316 r->rpm_clk_id, &value, 1); 317 if (!ret) 318 r->enabled = false; 319 } 320 321 static int clk_rpm_set_rate(struct clk_hw *hw, 322 unsigned long rate, unsigned long parent_rate) 323 { 324 struct clk_rpm *r = to_clk_rpm(hw); 325 struct clk_rpm *peer = r->peer; 326 unsigned long active_rate, sleep_rate; 327 unsigned long this_rate = 0, this_sleep_rate = 0; 328 unsigned long peer_rate = 0, peer_sleep_rate = 0; 329 int ret = 0; 330 331 mutex_lock(&rpm_clk_lock); 332 333 if (!r->enabled) 334 goto out; 335 336 to_active_sleep(r, rate, &this_rate, &this_sleep_rate); 337 338 /* Take peer clock's rate into account only if it's enabled. */ 339 if (peer->enabled) 340 to_active_sleep(peer, peer->rate, 341 &peer_rate, &peer_sleep_rate); 342 343 active_rate = max(this_rate, peer_rate); 344 ret = clk_rpm_set_rate_active(r, active_rate); 345 if (ret) 346 goto out; 347 348 sleep_rate = max(this_sleep_rate, peer_sleep_rate); 349 ret = clk_rpm_set_rate_sleep(r, sleep_rate); 350 if (ret) 351 goto out; 352 353 r->rate = rate; 354 355 out: 356 mutex_unlock(&rpm_clk_lock); 357 358 return ret; 359 } 360 361 static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate, 362 unsigned long *parent_rate) 363 { 364 /* 365 * RPM handles rate rounding and we don't have a way to 366 * know what the rate will be, so just return whatever 367 * rate is requested. 368 */ 369 return rate; 370 } 371 372 static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw, 373 unsigned long parent_rate) 374 { 375 struct clk_rpm *r = to_clk_rpm(hw); 376 377 /* 378 * RPM handles rate rounding and we don't have a way to 379 * know what the rate will be, so just return whatever 380 * rate was set. 381 */ 382 return r->rate; 383 } 384 385 static const struct clk_ops clk_rpm_xo_ops = { 386 .prepare = clk_rpm_xo_prepare, 387 .unprepare = clk_rpm_xo_unprepare, 388 }; 389 390 static const struct clk_ops clk_rpm_fixed_ops = { 391 .prepare = clk_rpm_fixed_prepare, 392 .unprepare = clk_rpm_fixed_unprepare, 393 .round_rate = clk_rpm_round_rate, 394 .recalc_rate = clk_rpm_recalc_rate, 395 }; 396 397 static const struct clk_ops clk_rpm_ops = { 398 .prepare = clk_rpm_prepare, 399 .unprepare = clk_rpm_unprepare, 400 .set_rate = clk_rpm_set_rate, 401 .round_rate = clk_rpm_round_rate, 402 .recalc_rate = clk_rpm_recalc_rate, 403 }; 404 405 /* MSM8660/APQ8060 */ 406 DEFINE_CLK_RPM(msm8660, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK); 407 DEFINE_CLK_RPM(msm8660, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK); 408 DEFINE_CLK_RPM(msm8660, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK); 409 DEFINE_CLK_RPM(msm8660, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); 410 DEFINE_CLK_RPM(msm8660, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); 411 DEFINE_CLK_RPM(msm8660, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK); 412 DEFINE_CLK_RPM(msm8660, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK); 413 DEFINE_CLK_RPM(msm8660, smi_clk, smi_a_clk, QCOM_RPM_SMI_CLK); 414 DEFINE_CLK_RPM(msm8660, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK); 415 DEFINE_CLK_RPM_FIXED(msm8660, pll4_clk, pll4_a_clk, QCOM_RPM_PLL_4, 540672000); 416 417 static struct clk_rpm *msm8660_clks[] = { 418 [RPM_APPS_FABRIC_CLK] = &msm8660_afab_clk, 419 [RPM_APPS_FABRIC_A_CLK] = &msm8660_afab_a_clk, 420 [RPM_SYS_FABRIC_CLK] = &msm8660_sfab_clk, 421 [RPM_SYS_FABRIC_A_CLK] = &msm8660_sfab_a_clk, 422 [RPM_MM_FABRIC_CLK] = &msm8660_mmfab_clk, 423 [RPM_MM_FABRIC_A_CLK] = &msm8660_mmfab_a_clk, 424 [RPM_DAYTONA_FABRIC_CLK] = &msm8660_daytona_clk, 425 [RPM_DAYTONA_FABRIC_A_CLK] = &msm8660_daytona_a_clk, 426 [RPM_SFPB_CLK] = &msm8660_sfpb_clk, 427 [RPM_SFPB_A_CLK] = &msm8660_sfpb_a_clk, 428 [RPM_CFPB_CLK] = &msm8660_cfpb_clk, 429 [RPM_CFPB_A_CLK] = &msm8660_cfpb_a_clk, 430 [RPM_MMFPB_CLK] = &msm8660_mmfpb_clk, 431 [RPM_MMFPB_A_CLK] = &msm8660_mmfpb_a_clk, 432 [RPM_SMI_CLK] = &msm8660_smi_clk, 433 [RPM_SMI_A_CLK] = &msm8660_smi_a_clk, 434 [RPM_EBI1_CLK] = &msm8660_ebi1_clk, 435 [RPM_EBI1_A_CLK] = &msm8660_ebi1_a_clk, 436 [RPM_PLL4_CLK] = &msm8660_pll4_clk, 437 }; 438 439 static const struct rpm_clk_desc rpm_clk_msm8660 = { 440 .clks = msm8660_clks, 441 .num_clks = ARRAY_SIZE(msm8660_clks), 442 }; 443 444 /* apq8064 */ 445 DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK); 446 DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK); 447 DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); 448 DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK); 449 DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK); 450 DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK); 451 DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK); 452 DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); 453 DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK); 454 DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d0_clk, xo_d0_a_clk, 0); 455 DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d1_clk, xo_d1_a_clk, 8); 456 DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a0_clk, xo_a0_a_clk, 16); 457 DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a1_clk, xo_a1_a_clk, 24); 458 DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a2_clk, xo_a2_a_clk, 28); 459 460 static struct clk_rpm *apq8064_clks[] = { 461 [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk, 462 [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk, 463 [RPM_CFPB_CLK] = &apq8064_cfpb_clk, 464 [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk, 465 [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk, 466 [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk, 467 [RPM_EBI1_CLK] = &apq8064_ebi1_clk, 468 [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk, 469 [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk, 470 [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk, 471 [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk, 472 [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk, 473 [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk, 474 [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk, 475 [RPM_SFPB_CLK] = &apq8064_sfpb_clk, 476 [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk, 477 [RPM_QDSS_CLK] = &apq8064_qdss_clk, 478 [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk, 479 [RPM_XO_D0] = &apq8064_xo_d0_clk, 480 [RPM_XO_D1] = &apq8064_xo_d1_clk, 481 [RPM_XO_A0] = &apq8064_xo_a0_clk, 482 [RPM_XO_A1] = &apq8064_xo_a1_clk, 483 [RPM_XO_A2] = &apq8064_xo_a2_clk, 484 }; 485 486 static const struct rpm_clk_desc rpm_clk_apq8064 = { 487 .clks = apq8064_clks, 488 .num_clks = ARRAY_SIZE(apq8064_clks), 489 }; 490 491 /* ipq806x */ 492 DEFINE_CLK_RPM(ipq806x, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK); 493 DEFINE_CLK_RPM(ipq806x, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK); 494 DEFINE_CLK_RPM(ipq806x, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); 495 DEFINE_CLK_RPM(ipq806x, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK); 496 DEFINE_CLK_RPM(ipq806x, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK); 497 DEFINE_CLK_RPM(ipq806x, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); 498 DEFINE_CLK_RPM(ipq806x, nss_fabric_0_clk, nss_fabric_0_a_clk, QCOM_RPM_NSS_FABRIC_0_CLK); 499 DEFINE_CLK_RPM(ipq806x, nss_fabric_1_clk, nss_fabric_1_a_clk, QCOM_RPM_NSS_FABRIC_1_CLK); 500 501 static struct clk_rpm *ipq806x_clks[] = { 502 [RPM_APPS_FABRIC_CLK] = &ipq806x_afab_clk, 503 [RPM_APPS_FABRIC_A_CLK] = &ipq806x_afab_a_clk, 504 [RPM_CFPB_CLK] = &ipq806x_cfpb_clk, 505 [RPM_CFPB_A_CLK] = &ipq806x_cfpb_a_clk, 506 [RPM_DAYTONA_FABRIC_CLK] = &ipq806x_daytona_clk, 507 [RPM_DAYTONA_FABRIC_A_CLK] = &ipq806x_daytona_a_clk, 508 [RPM_EBI1_CLK] = &ipq806x_ebi1_clk, 509 [RPM_EBI1_A_CLK] = &ipq806x_ebi1_a_clk, 510 [RPM_SYS_FABRIC_CLK] = &ipq806x_sfab_clk, 511 [RPM_SYS_FABRIC_A_CLK] = &ipq806x_sfab_a_clk, 512 [RPM_SFPB_CLK] = &ipq806x_sfpb_clk, 513 [RPM_SFPB_A_CLK] = &ipq806x_sfpb_a_clk, 514 [RPM_NSS_FABRIC_0_CLK] = &ipq806x_nss_fabric_0_clk, 515 [RPM_NSS_FABRIC_0_A_CLK] = &ipq806x_nss_fabric_0_a_clk, 516 [RPM_NSS_FABRIC_1_CLK] = &ipq806x_nss_fabric_1_clk, 517 [RPM_NSS_FABRIC_1_A_CLK] = &ipq806x_nss_fabric_1_a_clk, 518 }; 519 520 static const struct rpm_clk_desc rpm_clk_ipq806x = { 521 .clks = ipq806x_clks, 522 .num_clks = ARRAY_SIZE(ipq806x_clks), 523 }; 524 525 static const struct of_device_id rpm_clk_match_table[] = { 526 { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 }, 527 { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 }, 528 { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 }, 529 { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x }, 530 { } 531 }; 532 MODULE_DEVICE_TABLE(of, rpm_clk_match_table); 533 534 static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec, 535 void *data) 536 { 537 struct rpm_cc *rcc = data; 538 unsigned int idx = clkspec->args[0]; 539 540 if (idx >= rcc->num_clks) { 541 pr_err("%s: invalid index %u\n", __func__, idx); 542 return ERR_PTR(-EINVAL); 543 } 544 545 return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT); 546 } 547 548 static int rpm_clk_probe(struct platform_device *pdev) 549 { 550 struct rpm_cc *rcc; 551 int ret; 552 size_t num_clks, i; 553 struct qcom_rpm *rpm; 554 struct clk_rpm **rpm_clks; 555 const struct rpm_clk_desc *desc; 556 557 rpm = dev_get_drvdata(pdev->dev.parent); 558 if (!rpm) { 559 dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); 560 return -ENODEV; 561 } 562 563 desc = of_device_get_match_data(&pdev->dev); 564 if (!desc) 565 return -EINVAL; 566 567 rpm_clks = desc->clks; 568 num_clks = desc->num_clks; 569 570 rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL); 571 if (!rcc) 572 return -ENOMEM; 573 574 rcc->clks = rpm_clks; 575 rcc->num_clks = num_clks; 576 mutex_init(&rcc->xo_lock); 577 578 for (i = 0; i < num_clks; i++) { 579 if (!rpm_clks[i]) 580 continue; 581 582 rpm_clks[i]->rpm = rpm; 583 rpm_clks[i]->rpm_cc = rcc; 584 585 ret = clk_rpm_handoff(rpm_clks[i]); 586 if (ret) 587 goto err; 588 } 589 590 for (i = 0; i < num_clks; i++) { 591 if (!rpm_clks[i]) 592 continue; 593 594 ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw); 595 if (ret) 596 goto err; 597 } 598 599 ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get, 600 rcc); 601 if (ret) 602 goto err; 603 604 return 0; 605 err: 606 dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret); 607 return ret; 608 } 609 610 static int rpm_clk_remove(struct platform_device *pdev) 611 { 612 of_clk_del_provider(pdev->dev.of_node); 613 return 0; 614 } 615 616 static struct platform_driver rpm_clk_driver = { 617 .driver = { 618 .name = "qcom-clk-rpm", 619 .of_match_table = rpm_clk_match_table, 620 }, 621 .probe = rpm_clk_probe, 622 .remove = rpm_clk_remove, 623 }; 624 625 static int __init rpm_clk_init(void) 626 { 627 return platform_driver_register(&rpm_clk_driver); 628 } 629 core_initcall(rpm_clk_init); 630 631 static void __exit rpm_clk_exit(void) 632 { 633 platform_driver_unregister(&rpm_clk_driver); 634 } 635 module_exit(rpm_clk_exit); 636 637 MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver"); 638 MODULE_LICENSE("GPL v2"); 639 MODULE_ALIAS("platform:qcom-clk-rpm"); 640