xref: /linux/drivers/clk/imx/clk-scu.c (revision 5392c5de096a1cad7cc06265a8cbf18de2da22c7)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018 NXP
4  *   Dong Aisheng <aisheng.dong@nxp.com>
5  */
6 
7 #include <dt-bindings/firmware/imx/rsrc.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/clk-provider.h>
10 #include <linux/err.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 
17 #include "clk-scu.h"
18 
19 #define IMX_SIP_CPUFREQ			0xC2000001
20 #define IMX_SIP_SET_CPUFREQ		0x00
21 
22 static struct imx_sc_ipc *ccm_ipc_handle;
23 static struct device_node *pd_np;
24 static struct platform_driver imx_clk_scu_driver;
25 
26 struct imx_scu_clk_node {
27 	const char *name;
28 	u32 rsrc;
29 	u8 clk_type;
30 	const char * const *parents;
31 	int num_parents;
32 
33 	struct clk_hw *hw;
34 	struct list_head node;
35 };
36 
37 struct list_head imx_scu_clks[IMX_SC_R_LAST];
38 
39 /*
40  * struct clk_scu - Description of one SCU clock
41  * @hw: the common clk_hw
42  * @rsrc_id: resource ID of this SCU clock
43  * @clk_type: type of this clock resource
44  */
45 struct clk_scu {
46 	struct clk_hw hw;
47 	u16 rsrc_id;
48 	u8 clk_type;
49 
50 	/* for state save&restore */
51 	bool is_enabled;
52 	u32 rate;
53 };
54 
55 /*
56  * struct clk_gpr_scu - Description of one SCU GPR clock
57  * @hw: the common clk_hw
58  * @rsrc_id: resource ID of this SCU clock
59  * @gpr_id: GPR ID index to control the divider
60  */
61 struct clk_gpr_scu {
62 	struct clk_hw hw;
63 	u16 rsrc_id;
64 	u8 gpr_id;
65 	u8 flags;
66 	bool gate_invert;
67 };
68 
69 #define to_clk_gpr_scu(_hw) container_of(_hw, struct clk_gpr_scu, hw)
70 
71 /*
72  * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
73  * @hdr: SCU protocol header
74  * @rate: rate to set
75  * @resource: clock resource to set rate
76  * @clk: clk type of this resource
77  *
78  * This structure describes the SCU protocol of clock rate set
79  */
80 struct imx_sc_msg_req_set_clock_rate {
81 	struct imx_sc_rpc_msg hdr;
82 	__le32 rate;
83 	__le16 resource;
84 	u8 clk;
85 } __packed __aligned(4);
86 
87 struct req_get_clock_rate {
88 	__le16 resource;
89 	u8 clk;
90 } __packed __aligned(4);
91 
92 struct resp_get_clock_rate {
93 	__le32 rate;
94 };
95 
96 /*
97  * struct imx_sc_msg_get_clock_rate - clock get rate protocol
98  * @hdr: SCU protocol header
99  * @req: get rate request protocol
100  * @resp: get rate response protocol
101  *
102  * This structure describes the SCU protocol of clock rate get
103  */
104 struct imx_sc_msg_get_clock_rate {
105 	struct imx_sc_rpc_msg hdr;
106 	union {
107 		struct req_get_clock_rate req;
108 		struct resp_get_clock_rate resp;
109 	} data;
110 };
111 
112 /*
113  * struct imx_sc_msg_get_clock_parent - clock get parent protocol
114  * @hdr: SCU protocol header
115  * @req: get parent request protocol
116  * @resp: get parent response protocol
117  *
118  * This structure describes the SCU protocol of clock get parent
119  */
120 struct imx_sc_msg_get_clock_parent {
121 	struct imx_sc_rpc_msg hdr;
122 	union {
123 		struct req_get_clock_parent {
124 			__le16 resource;
125 			u8 clk;
126 		} __packed __aligned(4) req;
127 		struct resp_get_clock_parent {
128 			u8 parent;
129 		} resp;
130 	} data;
131 };
132 
133 /*
134  * struct imx_sc_msg_set_clock_parent - clock set parent protocol
135  * @hdr: SCU protocol header
136  * @req: set parent request protocol
137  *
138  * This structure describes the SCU protocol of clock set parent
139  */
140 struct imx_sc_msg_set_clock_parent {
141 	struct imx_sc_rpc_msg hdr;
142 	__le16 resource;
143 	u8 clk;
144 	u8 parent;
145 } __packed;
146 
147 /*
148  * struct imx_sc_msg_req_clock_enable - clock gate protocol
149  * @hdr: SCU protocol header
150  * @resource: clock resource to gate
151  * @clk: clk type of this resource
152  * @enable: whether gate off the clock
153  * @autog: HW auto gate enable
154  *
155  * This structure describes the SCU protocol of clock gate
156  */
157 struct imx_sc_msg_req_clock_enable {
158 	struct imx_sc_rpc_msg hdr;
159 	__le16 resource;
160 	u8 clk;
161 	u8 enable;
162 	u8 autog;
163 } __packed __aligned(4);
164 
165 static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
166 {
167 	return container_of(hw, struct clk_scu, hw);
168 }
169 
170 int imx_clk_scu_init(struct device_node *np)
171 {
172 	u32 clk_cells;
173 	int ret, i;
174 
175 	ret = imx_scu_get_handle(&ccm_ipc_handle);
176 	if (ret)
177 		return ret;
178 
179 	of_property_read_u32(np, "#clock-cells", &clk_cells);
180 
181 	if (clk_cells == 2) {
182 		for (i = 0; i < IMX_SC_R_LAST; i++)
183 			INIT_LIST_HEAD(&imx_scu_clks[i]);
184 
185 		/* pd_np will be used to attach power domains later */
186 		pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
187 		if (!pd_np)
188 			return -EINVAL;
189 	}
190 
191 	return platform_driver_register(&imx_clk_scu_driver);
192 }
193 
194 /*
195  * clk_scu_recalc_rate - Get clock rate for a SCU clock
196  * @hw: clock to get rate for
197  * @parent_rate: parent rate provided by common clock framework, not used
198  *
199  * Gets the current clock rate of a SCU clock. Returns the current
200  * clock rate, or zero in failure.
201  */
202 static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
203 					 unsigned long parent_rate)
204 {
205 	struct clk_scu *clk = to_clk_scu(hw);
206 	struct imx_sc_msg_get_clock_rate msg;
207 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
208 	int ret;
209 
210 	hdr->ver = IMX_SC_RPC_VERSION;
211 	hdr->svc = IMX_SC_RPC_SVC_PM;
212 	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
213 	hdr->size = 2;
214 
215 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
216 	msg.data.req.clk = clk->clk_type;
217 
218 	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
219 	if (ret) {
220 		pr_err("%s: failed to get clock rate %d\n",
221 		       clk_hw_get_name(hw), ret);
222 		return 0;
223 	}
224 
225 	return le32_to_cpu(msg.data.resp.rate);
226 }
227 
228 /*
229  * clk_scu_round_rate - Round clock rate for a SCU clock
230  * @hw: clock to round rate for
231  * @rate: rate to round
232  * @parent_rate: parent rate provided by common clock framework, not used
233  *
234  * Returns the current clock rate, or zero in failure.
235  */
236 static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
237 			       unsigned long *parent_rate)
238 {
239 	/*
240 	 * Assume we support all the requested rate and let the SCU firmware
241 	 * to handle the left work
242 	 */
243 	return rate;
244 }
245 
246 static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
247 				    unsigned long parent_rate)
248 {
249 	struct clk_scu *clk = to_clk_scu(hw);
250 	struct arm_smccc_res res;
251 	unsigned long cluster_id;
252 
253 	if (clk->rsrc_id == IMX_SC_R_A35)
254 		cluster_id = 0;
255 	else
256 		return -EINVAL;
257 
258 	/* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
259 	arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
260 		      cluster_id, rate, 0, 0, 0, 0, &res);
261 
262 	return 0;
263 }
264 
265 /*
266  * clk_scu_set_rate - Set rate for a SCU clock
267  * @hw: clock to change rate for
268  * @rate: target rate for the clock
269  * @parent_rate: rate of the clock parent, not used for SCU clocks
270  *
271  * Sets a clock frequency for a SCU clock. Returns the SCU
272  * protocol status.
273  */
274 static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
275 			    unsigned long parent_rate)
276 {
277 	struct clk_scu *clk = to_clk_scu(hw);
278 	struct imx_sc_msg_req_set_clock_rate msg;
279 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
280 
281 	hdr->ver = IMX_SC_RPC_VERSION;
282 	hdr->svc = IMX_SC_RPC_SVC_PM;
283 	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
284 	hdr->size = 3;
285 
286 	msg.rate = cpu_to_le32(rate);
287 	msg.resource = cpu_to_le16(clk->rsrc_id);
288 	msg.clk = clk->clk_type;
289 
290 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
291 }
292 
293 static u8 clk_scu_get_parent(struct clk_hw *hw)
294 {
295 	struct clk_scu *clk = to_clk_scu(hw);
296 	struct imx_sc_msg_get_clock_parent msg;
297 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
298 	int ret;
299 
300 	hdr->ver = IMX_SC_RPC_VERSION;
301 	hdr->svc = IMX_SC_RPC_SVC_PM;
302 	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
303 	hdr->size = 2;
304 
305 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
306 	msg.data.req.clk = clk->clk_type;
307 
308 	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
309 	if (ret) {
310 		pr_err("%s: failed to get clock parent %d\n",
311 		       clk_hw_get_name(hw), ret);
312 		return 0;
313 	}
314 
315 	return msg.data.resp.parent;
316 }
317 
318 static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
319 {
320 	struct clk_scu *clk = to_clk_scu(hw);
321 	struct imx_sc_msg_set_clock_parent msg;
322 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
323 
324 	hdr->ver = IMX_SC_RPC_VERSION;
325 	hdr->svc = IMX_SC_RPC_SVC_PM;
326 	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
327 	hdr->size = 2;
328 
329 	msg.resource = cpu_to_le16(clk->rsrc_id);
330 	msg.clk = clk->clk_type;
331 	msg.parent = index;
332 
333 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
334 }
335 
336 static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
337 			      u8 clk, bool enable, bool autog)
338 {
339 	struct imx_sc_msg_req_clock_enable msg;
340 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
341 
342 	hdr->ver = IMX_SC_RPC_VERSION;
343 	hdr->svc = IMX_SC_RPC_SVC_PM;
344 	hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
345 	hdr->size = 3;
346 
347 	msg.resource = cpu_to_le16(resource);
348 	msg.clk = clk;
349 	msg.enable = enable;
350 	msg.autog = autog;
351 
352 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
353 }
354 
355 /*
356  * clk_scu_prepare - Enable a SCU clock
357  * @hw: clock to enable
358  *
359  * Enable the clock at the DSC slice level
360  */
361 static int clk_scu_prepare(struct clk_hw *hw)
362 {
363 	struct clk_scu *clk = to_clk_scu(hw);
364 
365 	return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
366 				  clk->clk_type, true, false);
367 }
368 
369 /*
370  * clk_scu_unprepare - Disable a SCU clock
371  * @hw: clock to enable
372  *
373  * Disable the clock at the DSC slice level
374  */
375 static void clk_scu_unprepare(struct clk_hw *hw)
376 {
377 	struct clk_scu *clk = to_clk_scu(hw);
378 	int ret;
379 
380 	ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
381 				 clk->clk_type, false, false);
382 	if (ret)
383 		pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
384 			ret);
385 }
386 
387 static const struct clk_ops clk_scu_ops = {
388 	.recalc_rate = clk_scu_recalc_rate,
389 	.round_rate = clk_scu_round_rate,
390 	.set_rate = clk_scu_set_rate,
391 	.get_parent = clk_scu_get_parent,
392 	.set_parent = clk_scu_set_parent,
393 	.prepare = clk_scu_prepare,
394 	.unprepare = clk_scu_unprepare,
395 };
396 
397 static const struct clk_ops clk_scu_cpu_ops = {
398 	.recalc_rate = clk_scu_recalc_rate,
399 	.round_rate = clk_scu_round_rate,
400 	.set_rate = clk_scu_atf_set_cpu_rate,
401 	.prepare = clk_scu_prepare,
402 	.unprepare = clk_scu_unprepare,
403 };
404 
405 struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
406 			     const char * const *parents, int num_parents,
407 			     u32 rsrc_id, u8 clk_type)
408 {
409 	struct clk_init_data init;
410 	struct clk_scu *clk;
411 	struct clk_hw *hw;
412 	int ret;
413 
414 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
415 	if (!clk)
416 		return ERR_PTR(-ENOMEM);
417 
418 	clk->rsrc_id = rsrc_id;
419 	clk->clk_type = clk_type;
420 
421 	init.name = name;
422 	init.ops = &clk_scu_ops;
423 	if (rsrc_id == IMX_SC_R_A35)
424 		init.ops = &clk_scu_cpu_ops;
425 	else
426 		init.ops = &clk_scu_ops;
427 	init.parent_names = parents;
428 	init.num_parents = num_parents;
429 
430 	/*
431 	 * Note on MX8, the clocks are tightly coupled with power domain
432 	 * that once the power domain is off, the clock status may be
433 	 * lost. So we make it NOCACHE to let user to retrieve the real
434 	 * clock status from HW instead of using the possible invalid
435 	 * cached rate.
436 	 */
437 	init.flags = CLK_GET_RATE_NOCACHE;
438 	clk->hw.init = &init;
439 
440 	hw = &clk->hw;
441 	ret = clk_hw_register(dev, hw);
442 	if (ret) {
443 		kfree(clk);
444 		hw = ERR_PTR(ret);
445 		return hw;
446 	}
447 
448 	if (dev)
449 		dev_set_drvdata(dev, clk);
450 
451 	return hw;
452 }
453 
454 struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
455 				      void *data)
456 {
457 	unsigned int rsrc = clkspec->args[0];
458 	unsigned int idx = clkspec->args[1];
459 	struct list_head *scu_clks = data;
460 	struct imx_scu_clk_node *clk;
461 
462 	list_for_each_entry(clk, &scu_clks[rsrc], node) {
463 		if (clk->clk_type == idx)
464 			return clk->hw;
465 	}
466 
467 	return ERR_PTR(-ENODEV);
468 }
469 
470 static int imx_clk_scu_probe(struct platform_device *pdev)
471 {
472 	struct device *dev = &pdev->dev;
473 	struct imx_scu_clk_node *clk = dev_get_platdata(dev);
474 	struct clk_hw *hw;
475 	int ret;
476 
477 	pm_runtime_set_suspended(dev);
478 	pm_runtime_set_autosuspend_delay(dev, 50);
479 	pm_runtime_use_autosuspend(&pdev->dev);
480 	pm_runtime_enable(dev);
481 
482 	ret = pm_runtime_get_sync(dev);
483 	if (ret) {
484 		pm_runtime_disable(dev);
485 		return ret;
486 	}
487 
488 	hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
489 			   clk->rsrc, clk->clk_type);
490 	if (IS_ERR(hw)) {
491 		pm_runtime_disable(dev);
492 		return PTR_ERR(hw);
493 	}
494 
495 	clk->hw = hw;
496 	list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
497 
498 	pm_runtime_mark_last_busy(&pdev->dev);
499 	pm_runtime_put_autosuspend(&pdev->dev);
500 
501 	dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
502 		clk->clk_type);
503 
504 	return 0;
505 }
506 
507 static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
508 {
509 	struct clk_scu *clk = dev_get_drvdata(dev);
510 
511 	clk->rate = clk_hw_get_rate(&clk->hw);
512 	clk->is_enabled = clk_hw_is_enabled(&clk->hw);
513 
514 	if (clk->rate)
515 		dev_dbg(dev, "save rate %d\n", clk->rate);
516 
517 	if (clk->is_enabled)
518 		dev_dbg(dev, "save enabled state\n");
519 
520 	return 0;
521 }
522 
523 static int __maybe_unused imx_clk_scu_resume(struct device *dev)
524 {
525 	struct clk_scu *clk = dev_get_drvdata(dev);
526 	int ret = 0;
527 
528 	if (clk->rate) {
529 		ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
530 		dev_dbg(dev, "restore rate %d %s\n", clk->rate,
531 			!ret ? "success" : "failed");
532 	}
533 
534 	if (clk->is_enabled) {
535 		ret = clk_scu_prepare(&clk->hw);
536 		dev_dbg(dev, "restore enabled state %s\n",
537 			!ret ? "success" : "failed");
538 	}
539 
540 	return ret;
541 }
542 
543 static const struct dev_pm_ops imx_clk_scu_pm_ops = {
544 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
545 				      imx_clk_scu_resume)
546 };
547 
548 static struct platform_driver imx_clk_scu_driver = {
549 	.driver = {
550 		.name = "imx-scu-clk",
551 		.suppress_bind_attrs = true,
552 		.pm = &imx_clk_scu_pm_ops,
553 	},
554 	.probe = imx_clk_scu_probe,
555 };
556 
557 static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
558 {
559 	struct of_phandle_args genpdspec = {
560 		.np = pd_np,
561 		.args_count = 1,
562 		.args[0] = rsrc_id,
563 	};
564 
565 	if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
566 	    rsrc_id == IMX_SC_R_A72)
567 		return 0;
568 
569 	return of_genpd_add_device(&genpdspec, dev);
570 }
571 
572 struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
573 				     const char * const *parents,
574 				     int num_parents, u32 rsrc_id, u8 clk_type)
575 {
576 	struct imx_scu_clk_node clk = {
577 		.name = name,
578 		.rsrc = rsrc_id,
579 		.clk_type = clk_type,
580 		.parents = parents,
581 		.num_parents = num_parents,
582 	};
583 	struct platform_device *pdev;
584 	int ret;
585 
586 	pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
587 	if (!pdev) {
588 		pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
589 		       name, rsrc_id, clk_type);
590 		return ERR_PTR(-ENOMEM);
591 	}
592 
593 	ret = platform_device_add_data(pdev, &clk, sizeof(clk));
594 	if (ret) {
595 		platform_device_put(pdev);
596 		return ERR_PTR(ret);
597 	}
598 
599 	pdev->driver_override = "imx-scu-clk";
600 
601 	ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
602 	if (ret)
603 		pr_warn("%s: failed to attached the power domain %d\n",
604 			name, ret);
605 
606 	platform_device_add(pdev);
607 
608 	/* For API backwards compatiblilty, simply return NULL for success */
609 	return NULL;
610 }
611 
612 void imx_clk_scu_unregister(void)
613 {
614 	struct imx_scu_clk_node *clk;
615 	int i;
616 
617 	for (i = 0; i < IMX_SC_R_LAST; i++) {
618 		list_for_each_entry(clk, &imx_scu_clks[i], node) {
619 			clk_hw_unregister(clk->hw);
620 			kfree(clk);
621 		}
622 	}
623 }
624 
625 static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
626 						 unsigned long parent_rate)
627 {
628 	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
629 	unsigned long rate = 0;
630 	u32 val;
631 	int err;
632 
633 	err = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
634 				      clk->gpr_id, &val);
635 
636 	rate  = val ? parent_rate / 2 : parent_rate;
637 
638 	return err ? 0 : rate;
639 }
640 
641 static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
642 				   unsigned long *prate)
643 {
644 	if (rate < *prate)
645 		rate = *prate / 2;
646 	else
647 		rate = *prate;
648 
649 	return rate;
650 }
651 
652 static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
653 				    unsigned long parent_rate)
654 {
655 	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
656 	uint32_t val;
657 	int err;
658 
659 	val = (rate < parent_rate) ? 1 : 0;
660 	err = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
661 				      clk->gpr_id, val);
662 
663 	return err ? -EINVAL : 0;
664 }
665 
666 static const struct clk_ops clk_gpr_div_scu_ops = {
667 	.recalc_rate = clk_gpr_div_scu_recalc_rate,
668 	.round_rate = clk_gpr_div_scu_round_rate,
669 	.set_rate = clk_gpr_div_scu_set_rate,
670 };
671 
672 static u8 clk_gpr_mux_scu_get_parent(struct clk_hw *hw)
673 {
674 	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
675 	u32 val = 0;
676 
677 	imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
678 				clk->gpr_id, &val);
679 
680 	return (u8)val;
681 }
682 
683 static int clk_gpr_mux_scu_set_parent(struct clk_hw *hw, u8 index)
684 {
685 	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
686 
687 	return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
688 				       clk->gpr_id, index);
689 }
690 
691 static const struct clk_ops clk_gpr_mux_scu_ops = {
692 	.get_parent = clk_gpr_mux_scu_get_parent,
693 	.set_parent = clk_gpr_mux_scu_set_parent,
694 };
695 
696 static int clk_gpr_gate_scu_prepare(struct clk_hw *hw)
697 {
698 	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
699 
700 	return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
701 				       clk->gpr_id, !clk->gate_invert);
702 }
703 
704 static void clk_gpr_gate_scu_unprepare(struct clk_hw *hw)
705 {
706 	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
707 	int ret;
708 
709 	ret = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
710 				      clk->gpr_id, clk->gate_invert);
711 	if (ret)
712 		pr_err("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
713 		       ret);
714 }
715 
716 static int clk_gpr_gate_scu_is_prepared(struct clk_hw *hw)
717 {
718 	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
719 	int ret;
720 	u32 val;
721 
722 	ret = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
723 				      clk->gpr_id, &val);
724 	if (ret)
725 		return ret;
726 
727 	return clk->gate_invert ? !val : val;
728 }
729 
730 static const struct clk_ops clk_gpr_gate_scu_ops = {
731 	.prepare = clk_gpr_gate_scu_prepare,
732 	.unprepare = clk_gpr_gate_scu_unprepare,
733 	.is_prepared = clk_gpr_gate_scu_is_prepared,
734 };
735 
736 struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_name,
737 				 int num_parents, u32 rsrc_id, u8 gpr_id, u8 flags,
738 				 bool invert)
739 {
740 	struct imx_scu_clk_node *clk_node;
741 	struct clk_gpr_scu *clk;
742 	struct clk_hw *hw;
743 	struct clk_init_data init;
744 	int ret;
745 
746 	if (rsrc_id >= IMX_SC_R_LAST || gpr_id >= IMX_SC_C_LAST)
747 		return ERR_PTR(-EINVAL);
748 
749 	clk_node = kzalloc(sizeof(*clk_node), GFP_KERNEL);
750 	if (!clk_node)
751 		return ERR_PTR(-ENOMEM);
752 
753 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
754 	if (!clk) {
755 		kfree(clk_node);
756 		return ERR_PTR(-ENOMEM);
757 	}
758 
759 	clk->rsrc_id = rsrc_id;
760 	clk->gpr_id = gpr_id;
761 	clk->flags = flags;
762 	clk->gate_invert = invert;
763 
764 	if (flags & IMX_SCU_GPR_CLK_GATE)
765 		init.ops = &clk_gpr_gate_scu_ops;
766 
767 	if (flags & IMX_SCU_GPR_CLK_DIV)
768 		init.ops = &clk_gpr_div_scu_ops;
769 
770 	if (flags & IMX_SCU_GPR_CLK_MUX)
771 		init.ops = &clk_gpr_mux_scu_ops;
772 
773 	init.flags = 0;
774 	init.name = name;
775 	init.parent_names = parent_name;
776 	init.num_parents = num_parents;
777 
778 	clk->hw.init = &init;
779 
780 	hw = &clk->hw;
781 	ret = clk_hw_register(NULL, hw);
782 	if (ret) {
783 		kfree(clk);
784 		kfree(clk_node);
785 		hw = ERR_PTR(ret);
786 	} else {
787 		clk_node->hw = hw;
788 		clk_node->clk_type = gpr_id;
789 		list_add_tail(&clk_node->node, &imx_scu_clks[rsrc_id]);
790 	}
791 
792 	return hw;
793 }
794