xref: /linux/drivers/power/sequencing/pwrseq-thead-gpu.c (revision ffec878fa5fba8c527cbbb006b0522ae0d6599ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * T-HEAD TH1520 GPU Power Sequencer Driver
4  *
5  * Copyright (c) 2025 Samsung Electronics Co., Ltd.
6  * Author: Michal Wilczynski <m.wilczynski@samsung.com>
7  *
8  * This driver implements the power sequence for the Imagination BXM-4-64
9  * GPU on the T-HEAD TH1520 SoC. The sequence requires coordinating resources
10  * from both the sequencer's parent device node (clkgen_reset) and the GPU's
11  * device node (clocks and core reset).
12  *
13  * The `match` function is used to acquire the GPU's resources when the
14  * GPU driver requests the "gpu-power" sequence target.
15  */
16 
17 #include <linux/auxiliary_bus.h>
18 #include <linux/clk.h>
19 #include <linux/delay.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/pwrseq/provider.h>
23 #include <linux/reset.h>
24 #include <linux/slab.h>
25 
26 #include <dt-bindings/power/thead,th1520-power.h>
27 
28 struct pwrseq_thead_gpu_ctx {
29 	struct pwrseq_device *pwrseq;
30 	struct reset_control *clkgen_reset;
31 	struct device_node *aon_node;
32 
33 	/* Consumer resources */
34 	struct device_node *consumer_node;
35 	struct clk_bulk_data *clks;
36 	int num_clks;
37 	struct reset_control *gpu_reset;
38 };
39 
pwrseq_thead_gpu_enable(struct pwrseq_device * pwrseq)40 static int pwrseq_thead_gpu_enable(struct pwrseq_device *pwrseq)
41 {
42 	struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
43 	int ret;
44 
45 	if (!ctx->clks || !ctx->gpu_reset)
46 		return -ENODEV;
47 
48 	ret = clk_bulk_prepare_enable(ctx->num_clks, ctx->clks);
49 	if (ret)
50 		return ret;
51 
52 	ret = reset_control_deassert(ctx->clkgen_reset);
53 	if (ret)
54 		goto err_disable_clks;
55 
56 	/*
57 	 * According to the hardware manual, a delay of at least 32 clock
58 	 * cycles is required between de-asserting the clkgen reset and
59 	 * de-asserting the GPU reset. Assuming a worst-case scenario with
60 	 * a very high GPU clock frequency, a delay of 1 microsecond is
61 	 * sufficient to ensure this requirement is met across all
62 	 * feasible GPU clock speeds.
63 	 */
64 	udelay(1);
65 
66 	ret = reset_control_deassert(ctx->gpu_reset);
67 	if (ret)
68 		goto err_assert_clkgen;
69 
70 	return 0;
71 
72 err_assert_clkgen:
73 	reset_control_assert(ctx->clkgen_reset);
74 err_disable_clks:
75 	clk_bulk_disable_unprepare(ctx->num_clks, ctx->clks);
76 	return ret;
77 }
78 
pwrseq_thead_gpu_disable(struct pwrseq_device * pwrseq)79 static int pwrseq_thead_gpu_disable(struct pwrseq_device *pwrseq)
80 {
81 	struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
82 	int ret = 0, err;
83 
84 	if (!ctx->clks || !ctx->gpu_reset)
85 		return -ENODEV;
86 
87 	err = reset_control_assert(ctx->gpu_reset);
88 	if (err)
89 		ret = err;
90 
91 	err = reset_control_assert(ctx->clkgen_reset);
92 	if (err && !ret)
93 		ret = err;
94 
95 	clk_bulk_disable_unprepare(ctx->num_clks, ctx->clks);
96 
97 	/* ret stores values of the first error code */
98 	return ret;
99 }
100 
101 static const struct pwrseq_unit_data pwrseq_thead_gpu_unit = {
102 	.name = "gpu-power-sequence",
103 	.enable = pwrseq_thead_gpu_enable,
104 	.disable = pwrseq_thead_gpu_disable,
105 };
106 
107 static const struct pwrseq_target_data pwrseq_thead_gpu_target = {
108 	.name = "gpu-power",
109 	.unit = &pwrseq_thead_gpu_unit,
110 };
111 
112 static const struct pwrseq_target_data *pwrseq_thead_gpu_targets[] = {
113 	&pwrseq_thead_gpu_target,
114 	NULL
115 };
116 
pwrseq_thead_gpu_match(struct pwrseq_device * pwrseq,struct device * dev)117 static int pwrseq_thead_gpu_match(struct pwrseq_device *pwrseq,
118 				  struct device *dev)
119 {
120 	struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
121 	static const char *const clk_names[] = { "core", "sys" };
122 	struct of_phandle_args pwr_spec;
123 	int i, ret;
124 
125 	/* We only match the specific T-HEAD TH1520 GPU compatible */
126 	if (!of_device_is_compatible(dev->of_node, "thead,th1520-gpu"))
127 		return PWRSEQ_NO_MATCH;
128 
129 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
130 					 "#power-domain-cells", 0, &pwr_spec);
131 	if (ret)
132 		return PWRSEQ_NO_MATCH;
133 
134 	/* Additionally verify consumer device has AON as power-domain */
135 	if (pwr_spec.np != ctx->aon_node || pwr_spec.args[0] != TH1520_GPU_PD) {
136 		of_node_put(pwr_spec.np);
137 		return PWRSEQ_NO_MATCH;
138 	}
139 
140 	of_node_put(pwr_spec.np);
141 
142 	/* If a consumer is already bound, only allow a re-match from it */
143 	if (ctx->consumer_node)
144 		return ctx->consumer_node == dev->of_node ?
145 				PWRSEQ_MATCH_OK : PWRSEQ_NO_MATCH;
146 
147 	ctx->num_clks = ARRAY_SIZE(clk_names);
148 	ctx->clks = kcalloc(ctx->num_clks, sizeof(*ctx->clks), GFP_KERNEL);
149 	if (!ctx->clks)
150 		return -ENOMEM;
151 
152 	for (i = 0; i < ctx->num_clks; i++)
153 		ctx->clks[i].id = clk_names[i];
154 
155 	ret = clk_bulk_get(dev, ctx->num_clks, ctx->clks);
156 	if (ret)
157 		goto err_free_clks;
158 
159 	ctx->gpu_reset = reset_control_get_shared(dev, NULL);
160 	if (IS_ERR(ctx->gpu_reset)) {
161 		ret = PTR_ERR(ctx->gpu_reset);
162 		goto err_put_clks;
163 	}
164 
165 	ctx->consumer_node = of_node_get(dev->of_node);
166 
167 	return PWRSEQ_MATCH_OK;
168 
169 err_put_clks:
170 	clk_bulk_put(ctx->num_clks, ctx->clks);
171 err_free_clks:
172 	kfree(ctx->clks);
173 	ctx->clks = NULL;
174 
175 	return ret;
176 }
177 
pwrseq_thead_gpu_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)178 static int pwrseq_thead_gpu_probe(struct auxiliary_device *adev,
179 				  const struct auxiliary_device_id *id)
180 {
181 	struct device *dev = &adev->dev;
182 	struct device *parent_dev = dev->parent;
183 	struct pwrseq_thead_gpu_ctx *ctx;
184 	struct pwrseq_config config = {};
185 
186 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
187 	if (!ctx)
188 		return -ENOMEM;
189 
190 	ctx->aon_node = parent_dev->of_node;
191 
192 	ctx->clkgen_reset =
193 		devm_reset_control_get_exclusive(parent_dev, "gpu-clkgen");
194 	if (IS_ERR(ctx->clkgen_reset))
195 		return dev_err_probe(
196 			dev, PTR_ERR(ctx->clkgen_reset),
197 			"Failed to get GPU clkgen reset from parent\n");
198 
199 	config.parent = dev;
200 	config.owner = THIS_MODULE;
201 	config.drvdata = ctx;
202 	config.match = pwrseq_thead_gpu_match;
203 	config.targets = pwrseq_thead_gpu_targets;
204 
205 	ctx->pwrseq = devm_pwrseq_device_register(dev, &config);
206 	if (IS_ERR(ctx->pwrseq))
207 		return dev_err_probe(dev, PTR_ERR(ctx->pwrseq),
208 				     "Failed to register power sequencer\n");
209 
210 	auxiliary_set_drvdata(adev, ctx);
211 
212 	return 0;
213 }
214 
pwrseq_thead_gpu_remove(struct auxiliary_device * adev)215 static void pwrseq_thead_gpu_remove(struct auxiliary_device *adev)
216 {
217 	struct pwrseq_thead_gpu_ctx *ctx = auxiliary_get_drvdata(adev);
218 
219 	if (ctx->gpu_reset)
220 		reset_control_put(ctx->gpu_reset);
221 
222 	if (ctx->clks) {
223 		clk_bulk_put(ctx->num_clks, ctx->clks);
224 		kfree(ctx->clks);
225 	}
226 
227 	if (ctx->consumer_node)
228 		of_node_put(ctx->consumer_node);
229 }
230 
231 static const struct auxiliary_device_id pwrseq_thead_gpu_id_table[] = {
232 	{ .name = "th1520_pm_domains.pwrseq-gpu" },
233 	{},
234 };
235 MODULE_DEVICE_TABLE(auxiliary, pwrseq_thead_gpu_id_table);
236 
237 static struct auxiliary_driver pwrseq_thead_gpu_driver = {
238 	.driver = {
239 		.name = "pwrseq-thead-gpu",
240 	},
241 	.probe = pwrseq_thead_gpu_probe,
242 	.remove = pwrseq_thead_gpu_remove,
243 	.id_table = pwrseq_thead_gpu_id_table,
244 };
245 module_auxiliary_driver(pwrseq_thead_gpu_driver);
246 
247 MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
248 MODULE_DESCRIPTION("T-HEAD TH1520 GPU power sequencer driver");
249 MODULE_LICENSE("GPL");
250