1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2018-2021 NXP 4 * Dong Aisheng <aisheng.dong@nxp.com> 5 */ 6 7 #include <dt-bindings/firmware/imx/rsrc.h> 8 #include <linux/arm-smccc.h> 9 #include <linux/bsearch.h> 10 #include <linux/clk-provider.h> 11 #include <linux/err.h> 12 #include <linux/of.h> 13 #include <linux/firmware/imx/svc/rm.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_domain.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/slab.h> 18 #include <xen/xen.h> 19 20 #include "clk-scu.h" 21 22 #define IMX_SIP_CPUFREQ 0xC2000001 23 #define IMX_SIP_SET_CPUFREQ 0x00 24 25 static struct imx_sc_ipc *ccm_ipc_handle; 26 static struct device_node *pd_np; 27 static struct platform_driver imx_clk_scu_driver; 28 static const struct imx_clk_scu_rsrc_table *rsrc_table; 29 30 struct imx_scu_clk_node { 31 const char *name; 32 u32 rsrc; 33 u8 clk_type; 34 const char * const *parents; 35 int num_parents; 36 37 struct clk_hw *hw; 38 struct list_head node; 39 }; 40 41 struct list_head imx_scu_clks[IMX_SC_R_LAST]; 42 43 /* 44 * struct clk_scu - Description of one SCU clock 45 * @hw: the common clk_hw 46 * @rsrc_id: resource ID of this SCU clock 47 * @clk_type: type of this clock resource 48 */ 49 struct clk_scu { 50 struct clk_hw hw; 51 u16 rsrc_id; 52 u8 clk_type; 53 54 /* for state save&restore */ 55 struct clk_hw *parent; 56 u8 parent_index; 57 bool is_enabled; 58 u32 rate; 59 }; 60 61 /* 62 * struct clk_gpr_scu - Description of one SCU GPR clock 63 * @hw: the common clk_hw 64 * @rsrc_id: resource ID of this SCU clock 65 * @gpr_id: GPR ID index to control the divider 66 */ 67 struct clk_gpr_scu { 68 struct clk_hw hw; 69 u16 rsrc_id; 70 u8 gpr_id; 71 u8 flags; 72 bool gate_invert; 73 }; 74 75 #define to_clk_gpr_scu(_hw) container_of(_hw, struct clk_gpr_scu, hw) 76 77 /* 78 * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol 79 * @hdr: SCU protocol header 80 * @rate: rate to set 81 * @resource: clock resource to set rate 82 * @clk: clk type of this resource 83 * 84 * This structure describes the SCU protocol of clock rate set 85 */ 86 struct imx_sc_msg_req_set_clock_rate { 87 struct imx_sc_rpc_msg hdr; 88 __le32 rate; 89 __le16 resource; 90 u8 clk; 91 } __packed __aligned(4); 92 93 struct req_get_clock_rate { 94 __le16 resource; 95 u8 clk; 96 } __packed __aligned(4); 97 98 struct resp_get_clock_rate { 99 __le32 rate; 100 }; 101 102 /* 103 * struct imx_sc_msg_get_clock_rate - clock get rate protocol 104 * @hdr: SCU protocol header 105 * @req: get rate request protocol 106 * @resp: get rate response protocol 107 * 108 * This structure describes the SCU protocol of clock rate get 109 */ 110 struct imx_sc_msg_get_clock_rate { 111 struct imx_sc_rpc_msg hdr; 112 union { 113 struct req_get_clock_rate req; 114 struct resp_get_clock_rate resp; 115 } data; 116 }; 117 118 /* 119 * struct imx_sc_msg_get_clock_parent - clock get parent protocol 120 * @hdr: SCU protocol header 121 * @req: get parent request protocol 122 * @resp: get parent response protocol 123 * 124 * This structure describes the SCU protocol of clock get parent 125 */ 126 struct imx_sc_msg_get_clock_parent { 127 struct imx_sc_rpc_msg hdr; 128 union { 129 struct req_get_clock_parent { 130 __le16 resource; 131 u8 clk; 132 } __packed __aligned(4) req; 133 struct resp_get_clock_parent { 134 u8 parent; 135 } resp; 136 } data; 137 }; 138 139 /* 140 * struct imx_sc_msg_set_clock_parent - clock set parent protocol 141 * @hdr: SCU protocol header 142 * @req: set parent request protocol 143 * 144 * This structure describes the SCU protocol of clock set parent 145 */ 146 struct imx_sc_msg_set_clock_parent { 147 struct imx_sc_rpc_msg hdr; 148 __le16 resource; 149 u8 clk; 150 u8 parent; 151 } __packed; 152 153 /* 154 * struct imx_sc_msg_req_clock_enable - clock gate protocol 155 * @hdr: SCU protocol header 156 * @resource: clock resource to gate 157 * @clk: clk type of this resource 158 * @enable: whether gate off the clock 159 * @autog: HW auto gate enable 160 * 161 * This structure describes the SCU protocol of clock gate 162 */ 163 struct imx_sc_msg_req_clock_enable { 164 struct imx_sc_rpc_msg hdr; 165 __le16 resource; 166 u8 clk; 167 u8 enable; 168 u8 autog; 169 } __packed __aligned(4); 170 171 static inline struct clk_scu *to_clk_scu(struct clk_hw *hw) 172 { 173 return container_of(hw, struct clk_scu, hw); 174 } 175 176 static inline int imx_scu_clk_search_cmp(const void *rsrc, const void *rsrc_p) 177 { 178 return *(u32 *)rsrc - *(u32 *)rsrc_p; 179 } 180 181 static bool imx_scu_clk_is_valid(u32 rsrc_id) 182 { 183 void *p; 184 185 if (!rsrc_table) 186 return true; 187 188 p = bsearch(&rsrc_id, rsrc_table->rsrc, rsrc_table->num, 189 sizeof(rsrc_table->rsrc[0]), imx_scu_clk_search_cmp); 190 191 return p != NULL; 192 } 193 194 int imx_clk_scu_init(struct device_node *np, 195 const struct imx_clk_scu_rsrc_table *data) 196 { 197 u32 clk_cells; 198 int ret, i; 199 200 ret = imx_scu_get_handle(&ccm_ipc_handle); 201 if (ret) 202 return ret; 203 204 of_property_read_u32(np, "#clock-cells", &clk_cells); 205 206 if (clk_cells == 2) { 207 for (i = 0; i < IMX_SC_R_LAST; i++) 208 INIT_LIST_HEAD(&imx_scu_clks[i]); 209 210 /* pd_np will be used to attach power domains later */ 211 pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd"); 212 if (!pd_np) 213 return -EINVAL; 214 215 rsrc_table = data; 216 } 217 218 return platform_driver_register(&imx_clk_scu_driver); 219 } 220 221 /* 222 * clk_scu_recalc_rate - Get clock rate for a SCU clock 223 * @hw: clock to get rate for 224 * @parent_rate: parent rate provided by common clock framework, not used 225 * 226 * Gets the current clock rate of a SCU clock. Returns the current 227 * clock rate, or zero in failure. 228 */ 229 static unsigned long clk_scu_recalc_rate(struct clk_hw *hw, 230 unsigned long parent_rate) 231 { 232 struct clk_scu *clk = to_clk_scu(hw); 233 struct imx_sc_msg_get_clock_rate msg; 234 struct imx_sc_rpc_msg *hdr = &msg.hdr; 235 int ret; 236 237 hdr->ver = IMX_SC_RPC_VERSION; 238 hdr->svc = IMX_SC_RPC_SVC_PM; 239 hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE; 240 hdr->size = 2; 241 242 msg.data.req.resource = cpu_to_le16(clk->rsrc_id); 243 msg.data.req.clk = clk->clk_type; 244 245 ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true); 246 if (ret) { 247 pr_err("%s: failed to get clock rate %d\n", 248 clk_hw_get_name(hw), ret); 249 return 0; 250 } 251 252 return le32_to_cpu(msg.data.resp.rate); 253 } 254 255 /* 256 * clk_scu_determine_rate - Returns the closest rate for a SCU clock 257 * @hw: clock to round rate for 258 * @req: clock rate request 259 * 260 * Returns 0 on success, a negative error on failure 261 */ 262 static int clk_scu_determine_rate(struct clk_hw *hw, 263 struct clk_rate_request *req) 264 { 265 /* 266 * Assume we support all the requested rate and let the SCU firmware 267 * to handle the left work 268 */ 269 return 0; 270 } 271 272 static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate, 273 unsigned long parent_rate) 274 { 275 struct clk_scu *clk = to_clk_scu(hw); 276 struct arm_smccc_res res; 277 unsigned long cluster_id; 278 279 if (clk->rsrc_id == IMX_SC_R_A35 || clk->rsrc_id == IMX_SC_R_A53) 280 cluster_id = 0; 281 else if (clk->rsrc_id == IMX_SC_R_A72) 282 cluster_id = 1; 283 else 284 return -EINVAL; 285 286 /* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */ 287 arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ, 288 cluster_id, rate, 0, 0, 0, 0, &res); 289 290 return 0; 291 } 292 293 /* 294 * clk_scu_set_rate - Set rate for a SCU clock 295 * @hw: clock to change rate for 296 * @rate: target rate for the clock 297 * @parent_rate: rate of the clock parent, not used for SCU clocks 298 * 299 * Sets a clock frequency for a SCU clock. Returns the SCU 300 * protocol status. 301 */ 302 static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate, 303 unsigned long parent_rate) 304 { 305 struct clk_scu *clk = to_clk_scu(hw); 306 struct imx_sc_msg_req_set_clock_rate msg; 307 struct imx_sc_rpc_msg *hdr = &msg.hdr; 308 309 hdr->ver = IMX_SC_RPC_VERSION; 310 hdr->svc = IMX_SC_RPC_SVC_PM; 311 hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE; 312 hdr->size = 3; 313 314 msg.rate = cpu_to_le32(rate); 315 msg.resource = cpu_to_le16(clk->rsrc_id); 316 msg.clk = clk->clk_type; 317 318 return imx_scu_call_rpc(ccm_ipc_handle, &msg, true); 319 } 320 321 static u8 clk_scu_get_parent(struct clk_hw *hw) 322 { 323 struct clk_scu *clk = to_clk_scu(hw); 324 struct imx_sc_msg_get_clock_parent msg; 325 struct imx_sc_rpc_msg *hdr = &msg.hdr; 326 int ret; 327 328 hdr->ver = IMX_SC_RPC_VERSION; 329 hdr->svc = IMX_SC_RPC_SVC_PM; 330 hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT; 331 hdr->size = 2; 332 333 msg.data.req.resource = cpu_to_le16(clk->rsrc_id); 334 msg.data.req.clk = clk->clk_type; 335 336 ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true); 337 if (ret) { 338 pr_err("%s: failed to get clock parent %d\n", 339 clk_hw_get_name(hw), ret); 340 return 0; 341 } 342 343 clk->parent_index = msg.data.resp.parent; 344 345 return msg.data.resp.parent; 346 } 347 348 static int clk_scu_set_parent(struct clk_hw *hw, u8 index) 349 { 350 struct clk_scu *clk = to_clk_scu(hw); 351 struct imx_sc_msg_set_clock_parent msg; 352 struct imx_sc_rpc_msg *hdr = &msg.hdr; 353 int ret; 354 355 hdr->ver = IMX_SC_RPC_VERSION; 356 hdr->svc = IMX_SC_RPC_SVC_PM; 357 hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT; 358 hdr->size = 2; 359 360 msg.resource = cpu_to_le16(clk->rsrc_id); 361 msg.clk = clk->clk_type; 362 msg.parent = index; 363 364 ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true); 365 if (ret) { 366 pr_err("%s: failed to set clock parent %d\n", 367 clk_hw_get_name(hw), ret); 368 return ret; 369 } 370 371 clk->parent_index = index; 372 373 return 0; 374 } 375 376 static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource, 377 u8 clk, bool enable, bool autog) 378 { 379 struct imx_sc_msg_req_clock_enable msg; 380 struct imx_sc_rpc_msg *hdr = &msg.hdr; 381 382 hdr->ver = IMX_SC_RPC_VERSION; 383 hdr->svc = IMX_SC_RPC_SVC_PM; 384 hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE; 385 hdr->size = 3; 386 387 msg.resource = cpu_to_le16(resource); 388 msg.clk = clk; 389 msg.enable = enable; 390 msg.autog = autog; 391 392 return imx_scu_call_rpc(ccm_ipc_handle, &msg, true); 393 } 394 395 /* 396 * clk_scu_prepare - Enable a SCU clock 397 * @hw: clock to enable 398 * 399 * Enable the clock at the DSC slice level 400 */ 401 static int clk_scu_prepare(struct clk_hw *hw) 402 { 403 struct clk_scu *clk = to_clk_scu(hw); 404 405 return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id, 406 clk->clk_type, true, false); 407 } 408 409 /* 410 * clk_scu_unprepare - Disable a SCU clock 411 * @hw: clock to enable 412 * 413 * Disable the clock at the DSC slice level 414 */ 415 static void clk_scu_unprepare(struct clk_hw *hw) 416 { 417 struct clk_scu *clk = to_clk_scu(hw); 418 int ret; 419 420 ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id, 421 clk->clk_type, false, false); 422 if (ret) 423 pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw), 424 ret); 425 } 426 427 static const struct clk_ops clk_scu_ops = { 428 .recalc_rate = clk_scu_recalc_rate, 429 .determine_rate = clk_scu_determine_rate, 430 .set_rate = clk_scu_set_rate, 431 .get_parent = clk_scu_get_parent, 432 .set_parent = clk_scu_set_parent, 433 .prepare = clk_scu_prepare, 434 .unprepare = clk_scu_unprepare, 435 }; 436 437 static const struct clk_ops clk_scu_cpu_ops = { 438 .recalc_rate = clk_scu_recalc_rate, 439 .determine_rate = clk_scu_determine_rate, 440 .set_rate = clk_scu_atf_set_cpu_rate, 441 .prepare = clk_scu_prepare, 442 .unprepare = clk_scu_unprepare, 443 }; 444 445 static const struct clk_ops clk_scu_pi_ops = { 446 .recalc_rate = clk_scu_recalc_rate, 447 .determine_rate = clk_scu_determine_rate, 448 .set_rate = clk_scu_set_rate, 449 }; 450 451 struct clk_hw *__imx_clk_scu(struct device *dev, const char *name, 452 const char * const *parents, int num_parents, 453 u32 rsrc_id, u8 clk_type) 454 { 455 struct clk_init_data init; 456 struct clk_scu *clk; 457 struct clk_hw *hw; 458 int ret; 459 460 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 461 if (!clk) 462 return ERR_PTR(-ENOMEM); 463 464 clk->rsrc_id = rsrc_id; 465 clk->clk_type = clk_type; 466 467 init.name = name; 468 init.ops = &clk_scu_ops; 469 if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 || rsrc_id == IMX_SC_R_A72) 470 init.ops = &clk_scu_cpu_ops; 471 else if (rsrc_id == IMX_SC_R_PI_0_PLL) 472 init.ops = &clk_scu_pi_ops; 473 else 474 init.ops = &clk_scu_ops; 475 init.parent_names = parents; 476 init.num_parents = num_parents; 477 478 /* 479 * Note on MX8, the clocks are tightly coupled with power domain 480 * that once the power domain is off, the clock status may be 481 * lost. So we make it NOCACHE to let user to retrieve the real 482 * clock status from HW instead of using the possible invalid 483 * cached rate. 484 */ 485 init.flags = CLK_GET_RATE_NOCACHE; 486 clk->hw.init = &init; 487 488 hw = &clk->hw; 489 ret = clk_hw_register(dev, hw); 490 if (ret) { 491 kfree(clk); 492 hw = ERR_PTR(ret); 493 return hw; 494 } 495 496 if (dev) 497 dev_set_drvdata(dev, clk); 498 499 return hw; 500 } 501 502 struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec, 503 void *data) 504 { 505 unsigned int rsrc = clkspec->args[0]; 506 unsigned int idx = clkspec->args[1]; 507 struct list_head *scu_clks = data; 508 struct imx_scu_clk_node *clk; 509 510 list_for_each_entry(clk, &scu_clks[rsrc], node) { 511 if (clk->clk_type == idx) 512 return clk->hw; 513 } 514 515 return ERR_PTR(-ENODEV); 516 } 517 518 static int imx_clk_scu_probe(struct platform_device *pdev) 519 { 520 struct device *dev = &pdev->dev; 521 struct imx_scu_clk_node *clk = dev_get_platdata(dev); 522 struct clk_hw *hw; 523 int ret; 524 525 if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) || 526 (clk->rsrc == IMX_SC_R_A72))) { 527 pm_runtime_set_suspended(dev); 528 pm_runtime_set_autosuspend_delay(dev, 50); 529 pm_runtime_use_autosuspend(&pdev->dev); 530 pm_runtime_enable(dev); 531 532 ret = pm_runtime_resume_and_get(dev); 533 if (ret) { 534 pm_genpd_remove_device(dev); 535 pm_runtime_disable(dev); 536 return ret; 537 } 538 } 539 540 hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents, 541 clk->rsrc, clk->clk_type); 542 if (IS_ERR(hw)) { 543 pm_runtime_disable(dev); 544 return PTR_ERR(hw); 545 } 546 547 clk->hw = hw; 548 list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]); 549 550 if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) || 551 (clk->rsrc == IMX_SC_R_A72))) { 552 pm_runtime_put_autosuspend(&pdev->dev); 553 } 554 555 dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc, 556 clk->clk_type); 557 558 return 0; 559 } 560 561 static int __maybe_unused imx_clk_scu_suspend(struct device *dev) 562 { 563 struct clk_scu *clk = dev_get_drvdata(dev); 564 u32 rsrc_id = clk->rsrc_id; 565 566 if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) || 567 (rsrc_id == IMX_SC_R_A72)) 568 return 0; 569 570 clk->parent = clk_hw_get_parent(&clk->hw); 571 572 /* DC SS needs to handle bypass clock using non-cached clock rate */ 573 if (clk->rsrc_id == IMX_SC_R_DC_0_VIDEO0 || 574 clk->rsrc_id == IMX_SC_R_DC_0_VIDEO1 || 575 clk->rsrc_id == IMX_SC_R_DC_1_VIDEO0 || 576 clk->rsrc_id == IMX_SC_R_DC_1_VIDEO1) 577 clk->rate = clk_scu_recalc_rate(&clk->hw, 0); 578 else 579 clk->rate = clk_hw_get_rate(&clk->hw); 580 clk->is_enabled = clk_hw_is_prepared(&clk->hw); 581 582 if (clk->parent) 583 dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent), 584 clk->parent_index); 585 586 if (clk->rate) 587 dev_dbg(dev, "save rate %d\n", clk->rate); 588 589 if (clk->is_enabled) 590 dev_dbg(dev, "save enabled state\n"); 591 592 return 0; 593 } 594 595 static int __maybe_unused imx_clk_scu_resume(struct device *dev) 596 { 597 struct clk_scu *clk = dev_get_drvdata(dev); 598 u32 rsrc_id = clk->rsrc_id; 599 int ret = 0; 600 601 if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) || 602 (rsrc_id == IMX_SC_R_A72)) 603 return 0; 604 605 if (clk->parent) { 606 ret = clk_scu_set_parent(&clk->hw, clk->parent_index); 607 dev_dbg(dev, "restore parent %s idx %u %s\n", 608 clk_hw_get_name(clk->parent), 609 clk->parent_index, !ret ? "success" : "failed"); 610 } 611 612 if (clk->rate) { 613 ret = clk_scu_set_rate(&clk->hw, clk->rate, 0); 614 dev_dbg(dev, "restore rate %d %s\n", clk->rate, 615 !ret ? "success" : "failed"); 616 } 617 618 if (clk->is_enabled && rsrc_id != IMX_SC_R_PI_0_PLL) { 619 ret = clk_scu_prepare(&clk->hw); 620 dev_dbg(dev, "restore enabled state %s\n", 621 !ret ? "success" : "failed"); 622 } 623 624 return ret; 625 } 626 627 static const struct dev_pm_ops imx_clk_scu_pm_ops = { 628 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend, 629 imx_clk_scu_resume) 630 }; 631 632 static struct platform_driver imx_clk_scu_driver = { 633 .driver = { 634 .name = "imx-scu-clk", 635 .suppress_bind_attrs = true, 636 .pm = &imx_clk_scu_pm_ops, 637 }, 638 .probe = imx_clk_scu_probe, 639 }; 640 641 static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id) 642 { 643 struct of_phandle_args genpdspec = { 644 .np = pd_np, 645 .args_count = 1, 646 .args[0] = rsrc_id, 647 }; 648 649 if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 || 650 rsrc_id == IMX_SC_R_A72) 651 return 0; 652 653 return of_genpd_add_device(&genpdspec, dev); 654 } 655 656 static bool imx_clk_is_resource_owned(u32 rsrc) 657 { 658 /* 659 * A-core resources are special. SCFW reports they are not "owned" by 660 * current partition but linux can still adjust them for cpufreq. 661 */ 662 if (rsrc == IMX_SC_R_A53 || rsrc == IMX_SC_R_A72 || rsrc == IMX_SC_R_A35) 663 return true; 664 665 return imx_sc_rm_is_resource_owned(ccm_ipc_handle, rsrc); 666 } 667 668 struct clk_hw *imx_clk_scu_alloc_dev(const char *name, 669 const char * const *parents, 670 int num_parents, u32 rsrc_id, u8 clk_type) 671 { 672 struct imx_scu_clk_node clk = { 673 .name = name, 674 .rsrc = rsrc_id, 675 .clk_type = clk_type, 676 .parents = parents, 677 .num_parents = num_parents, 678 }; 679 struct platform_device *pdev; 680 int ret; 681 682 if (!imx_scu_clk_is_valid(rsrc_id)) 683 return ERR_PTR(-EINVAL); 684 685 if (!imx_clk_is_resource_owned(rsrc_id)) 686 return NULL; 687 688 pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE); 689 if (!pdev) { 690 pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n", 691 name, rsrc_id, clk_type); 692 return ERR_PTR(-ENOMEM); 693 } 694 695 ret = platform_device_add_data(pdev, &clk, sizeof(clk)); 696 if (ret) 697 goto put_device; 698 699 ret = driver_set_override(&pdev->dev, &pdev->driver_override, 700 "imx-scu-clk", strlen("imx-scu-clk")); 701 if (ret) 702 goto put_device; 703 704 ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id); 705 if (ret) 706 pr_warn("%s: failed to attached the power domain %d\n", 707 name, ret); 708 709 ret = platform_device_add(pdev); 710 if (ret) 711 goto put_device; 712 713 /* For API backwards compatibility, simply return NULL for success */ 714 return NULL; 715 716 put_device: 717 platform_device_put(pdev); 718 return ERR_PTR(ret); 719 } 720 721 void imx_clk_scu_unregister(void) 722 { 723 struct imx_scu_clk_node *clk, *n; 724 int i; 725 726 for (i = 0; i < IMX_SC_R_LAST; i++) { 727 list_for_each_entry_safe(clk, n, &imx_scu_clks[i], node) { 728 clk_hw_unregister(clk->hw); 729 kfree(clk); 730 } 731 } 732 } 733 734 static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw, 735 unsigned long parent_rate) 736 { 737 struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); 738 unsigned long rate = 0; 739 u32 val; 740 int err; 741 742 err = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id, 743 clk->gpr_id, &val); 744 745 rate = val ? parent_rate / 2 : parent_rate; 746 747 return err ? 0 : rate; 748 } 749 750 static int clk_gpr_div_scu_determine_rate(struct clk_hw *hw, 751 struct clk_rate_request *req) 752 { 753 if (req->rate < req->best_parent_rate) 754 req->rate = req->best_parent_rate / 2; 755 else 756 req->rate = req->best_parent_rate; 757 758 return 0; 759 } 760 761 static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate, 762 unsigned long parent_rate) 763 { 764 struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); 765 uint32_t val; 766 int err; 767 768 val = (rate < parent_rate) ? 1 : 0; 769 err = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id, 770 clk->gpr_id, val); 771 772 return err ? -EINVAL : 0; 773 } 774 775 static const struct clk_ops clk_gpr_div_scu_ops = { 776 .recalc_rate = clk_gpr_div_scu_recalc_rate, 777 .determine_rate = clk_gpr_div_scu_determine_rate, 778 .set_rate = clk_gpr_div_scu_set_rate, 779 }; 780 781 static u8 clk_gpr_mux_scu_get_parent(struct clk_hw *hw) 782 { 783 struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); 784 u32 val = 0; 785 786 imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id, 787 clk->gpr_id, &val); 788 789 return (u8)val; 790 } 791 792 static int clk_gpr_mux_scu_set_parent(struct clk_hw *hw, u8 index) 793 { 794 struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); 795 796 return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id, 797 clk->gpr_id, index); 798 } 799 800 static const struct clk_ops clk_gpr_mux_scu_ops = { 801 .determine_rate = clk_hw_determine_rate_no_reparent, 802 .get_parent = clk_gpr_mux_scu_get_parent, 803 .set_parent = clk_gpr_mux_scu_set_parent, 804 }; 805 806 static int clk_gpr_gate_scu_prepare(struct clk_hw *hw) 807 { 808 struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); 809 810 return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id, 811 clk->gpr_id, !clk->gate_invert); 812 } 813 814 static void clk_gpr_gate_scu_unprepare(struct clk_hw *hw) 815 { 816 struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); 817 int ret; 818 819 ret = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id, 820 clk->gpr_id, clk->gate_invert); 821 if (ret) 822 pr_err("%s: clk unprepare failed %d\n", clk_hw_get_name(hw), 823 ret); 824 } 825 826 static int clk_gpr_gate_scu_is_prepared(struct clk_hw *hw) 827 { 828 struct clk_gpr_scu *clk = to_clk_gpr_scu(hw); 829 int ret; 830 u32 val; 831 832 ret = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id, 833 clk->gpr_id, &val); 834 if (ret) 835 return ret; 836 837 return clk->gate_invert ? !val : val; 838 } 839 840 static const struct clk_ops clk_gpr_gate_scu_ops = { 841 .prepare = clk_gpr_gate_scu_prepare, 842 .unprepare = clk_gpr_gate_scu_unprepare, 843 .is_prepared = clk_gpr_gate_scu_is_prepared, 844 }; 845 846 struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_name, 847 int num_parents, u32 rsrc_id, u8 gpr_id, u8 flags, 848 bool invert) 849 { 850 struct imx_scu_clk_node *clk_node; 851 struct clk_gpr_scu *clk; 852 struct clk_hw *hw; 853 struct clk_init_data init; 854 int ret; 855 856 if (rsrc_id >= IMX_SC_R_LAST || gpr_id >= IMX_SC_C_LAST) 857 return ERR_PTR(-EINVAL); 858 859 clk_node = kzalloc(sizeof(*clk_node), GFP_KERNEL); 860 if (!clk_node) 861 return ERR_PTR(-ENOMEM); 862 863 if (!imx_scu_clk_is_valid(rsrc_id)) { 864 kfree(clk_node); 865 return ERR_PTR(-EINVAL); 866 } 867 868 if (!imx_clk_is_resource_owned(rsrc_id)) { 869 kfree(clk_node); 870 return NULL; 871 } 872 873 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 874 if (!clk) { 875 kfree(clk_node); 876 return ERR_PTR(-ENOMEM); 877 } 878 879 clk->rsrc_id = rsrc_id; 880 clk->gpr_id = gpr_id; 881 clk->flags = flags; 882 clk->gate_invert = invert; 883 884 if (flags & IMX_SCU_GPR_CLK_GATE) 885 init.ops = &clk_gpr_gate_scu_ops; 886 887 if (flags & IMX_SCU_GPR_CLK_DIV) 888 init.ops = &clk_gpr_div_scu_ops; 889 890 if (flags & IMX_SCU_GPR_CLK_MUX) 891 init.ops = &clk_gpr_mux_scu_ops; 892 893 init.flags = 0; 894 init.name = name; 895 init.parent_names = parent_name; 896 init.num_parents = num_parents; 897 898 clk->hw.init = &init; 899 900 hw = &clk->hw; 901 ret = clk_hw_register(NULL, hw); 902 if (ret) { 903 kfree(clk); 904 kfree(clk_node); 905 hw = ERR_PTR(ret); 906 } else { 907 clk_node->hw = hw; 908 clk_node->clk_type = gpr_id; 909 list_add_tail(&clk_node->node, &imx_scu_clks[rsrc_id]); 910 } 911 912 return hw; 913 } 914