xref: /linux/drivers/net/ipa/ipa_power.c (revision f2ec98566775dd4341ec1dcf93aa5859c60de826)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2022 Linaro Ltd.
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/device.h>
9 #include <linux/interconnect.h>
10 #include <linux/pm.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/bitops.h>
13 
14 #include "linux/soc/qcom/qcom_aoss.h"
15 
16 #include "ipa.h"
17 #include "ipa_power.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_modem.h"
20 #include "ipa_data.h"
21 
22 /**
23  * DOC: IPA Power Management
24  *
25  * The IPA hardware is enabled when the IPA core clock and all the
26  * interconnects (buses) it depends on are enabled.  Runtime power
27  * management is used to determine whether the core clock and
28  * interconnects are enabled, and if not in use to be suspended
29  * automatically.
30  *
31  * The core clock currently runs at a fixed clock rate when enabled,
32  * an all interconnects use a fixed average and peak bandwidth.
33  */
34 
35 #define IPA_AUTOSUSPEND_DELAY	500	/* milliseconds */
36 
37 /**
38  * enum ipa_power_flag - IPA power flags
39  * @IPA_POWER_FLAG_RESUMED:	Whether resume from suspend has been signaled
40  * @IPA_POWER_FLAG_SYSTEM:	Hardware is system (not runtime) suspended
41  * @IPA_POWER_FLAG_COUNT:	Number of defined power flags
42  */
43 enum ipa_power_flag {
44 	IPA_POWER_FLAG_RESUMED,
45 	IPA_POWER_FLAG_SYSTEM,
46 	IPA_POWER_FLAG_COUNT,		/* Last; not a flag */
47 };
48 
49 /**
50  * struct ipa_power - IPA power management information
51  * @dev:		IPA device pointer
52  * @core:		IPA core clock
53  * @qmp:		QMP handle for AOSS communication
54  * @flags:		Boolean state flags
55  * @interconnect_count:	Number of elements in interconnect[]
56  * @interconnect:	Interconnect array
57  */
58 struct ipa_power {
59 	struct device *dev;
60 	struct clk *core;
61 	struct qmp *qmp;
62 	DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
63 	u32 interconnect_count;
64 	struct icc_bulk_data interconnect[] __counted_by(interconnect_count);
65 };
66 
67 /* Initialize interconnects required for IPA operation */
68 static int ipa_interconnect_init(struct ipa_power *power,
69 				 const struct ipa_interconnect_data *data)
70 {
71 	struct icc_bulk_data *interconnect;
72 	int ret;
73 	u32 i;
74 
75 	/* Initialize our interconnect data array for bulk operations */
76 	interconnect = &power->interconnect[0];
77 	for (i = 0; i < power->interconnect_count; i++) {
78 		/* interconnect->path is filled in by of_icc_bulk_get() */
79 		interconnect->name = data->name;
80 		interconnect->avg_bw = data->average_bandwidth;
81 		interconnect->peak_bw = data->peak_bandwidth;
82 		data++;
83 		interconnect++;
84 	}
85 
86 	ret = of_icc_bulk_get(power->dev, power->interconnect_count,
87 			      power->interconnect);
88 	if (ret)
89 		return ret;
90 
91 	/* All interconnects are initially disabled */
92 	icc_bulk_disable(power->interconnect_count, power->interconnect);
93 
94 	/* Set the bandwidth values to be used when enabled */
95 	ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
96 	if (ret)
97 		icc_bulk_put(power->interconnect_count, power->interconnect);
98 
99 	return ret;
100 }
101 
102 /* Inverse of ipa_interconnect_init() */
103 static void ipa_interconnect_exit(struct ipa_power *power)
104 {
105 	icc_bulk_put(power->interconnect_count, power->interconnect);
106 }
107 
108 /* Enable IPA power, enabling interconnects and the core clock */
109 static int ipa_power_enable(struct ipa *ipa)
110 {
111 	struct ipa_power *power = ipa->power;
112 	int ret;
113 
114 	ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
115 	if (ret)
116 		return ret;
117 
118 	ret = clk_prepare_enable(power->core);
119 	if (ret) {
120 		dev_err(power->dev, "error %d enabling core clock\n", ret);
121 		icc_bulk_disable(power->interconnect_count,
122 				 power->interconnect);
123 	}
124 
125 	return ret;
126 }
127 
128 /* Inverse of ipa_power_enable() */
129 static void ipa_power_disable(struct ipa *ipa)
130 {
131 	struct ipa_power *power = ipa->power;
132 
133 	clk_disable_unprepare(power->core);
134 
135 	icc_bulk_disable(power->interconnect_count, power->interconnect);
136 }
137 
138 static int ipa_runtime_suspend(struct device *dev)
139 {
140 	struct ipa *ipa = dev_get_drvdata(dev);
141 
142 	/* Endpoints aren't usable until setup is complete */
143 	if (ipa->setup_complete) {
144 		__clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
145 		ipa_endpoint_suspend(ipa);
146 		gsi_suspend(&ipa->gsi);
147 	}
148 
149 	ipa_power_disable(ipa);
150 
151 	return 0;
152 }
153 
154 static int ipa_runtime_resume(struct device *dev)
155 {
156 	struct ipa *ipa = dev_get_drvdata(dev);
157 	int ret;
158 
159 	ret = ipa_power_enable(ipa);
160 	if (WARN_ON(ret < 0))
161 		return ret;
162 
163 	/* Endpoints aren't usable until setup is complete */
164 	if (ipa->setup_complete) {
165 		gsi_resume(&ipa->gsi);
166 		ipa_endpoint_resume(ipa);
167 	}
168 
169 	return 0;
170 }
171 
172 static int ipa_suspend(struct device *dev)
173 {
174 	struct ipa *ipa = dev_get_drvdata(dev);
175 
176 	__set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
177 
178 	/* Increment the disable depth to ensure that the IRQ won't
179 	 * be re-enabled until the matching _enable call in
180 	 * ipa_resume(). We do this to ensure that the interrupt
181 	 * handler won't run whilst PM runtime is disabled.
182 	 *
183 	 * Note that disabling the IRQ is NOT the same as disabling
184 	 * irq wake. If wakeup is enabled for the IPA then the IRQ
185 	 * will still cause the system to wake up, see irq_set_irq_wake().
186 	 */
187 	ipa_interrupt_irq_disable(ipa);
188 
189 	return pm_runtime_force_suspend(dev);
190 }
191 
192 static int ipa_resume(struct device *dev)
193 {
194 	struct ipa *ipa = dev_get_drvdata(dev);
195 	int ret;
196 
197 	ret = pm_runtime_force_resume(dev);
198 
199 	__clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
200 
201 	/* Now that PM runtime is enabled again it's safe
202 	 * to turn the IRQ back on and process any data
203 	 * that was received during suspend.
204 	 */
205 	ipa_interrupt_irq_enable(ipa);
206 
207 	return ret;
208 }
209 
210 /* Return the current IPA core clock rate */
211 u32 ipa_core_clock_rate(struct ipa *ipa)
212 {
213 	return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
214 }
215 
216 void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
217 {
218 	/* To handle an IPA interrupt we will have resumed the hardware
219 	 * just to handle the interrupt, so we're done.  If we are in a
220 	 * system suspend, trigger a system resume.
221 	 */
222 	if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
223 		if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
224 			pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
225 
226 	/* Acknowledge/clear the suspend interrupt on all endpoints */
227 	ipa_interrupt_suspend_clear_all(ipa->interrupt);
228 }
229 
230 static int ipa_power_retention_init(struct ipa_power *power)
231 {
232 	struct qmp *qmp = qmp_get(power->dev);
233 
234 	if (IS_ERR(qmp)) {
235 		if (PTR_ERR(qmp) == -EPROBE_DEFER)
236 			return -EPROBE_DEFER;
237 
238 		/* We assume any other error means it's not defined/needed */
239 		qmp = NULL;
240 	}
241 	power->qmp = qmp;
242 
243 	return 0;
244 }
245 
246 static void ipa_power_retention_exit(struct ipa_power *power)
247 {
248 	qmp_put(power->qmp);
249 	power->qmp = NULL;
250 }
251 
252 /* Control register retention on power collapse */
253 void ipa_power_retention(struct ipa *ipa, bool enable)
254 {
255 	static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
256 	struct ipa_power *power = ipa->power;
257 	int ret;
258 
259 	if (!power->qmp)
260 		return;		/* Not needed on this platform */
261 
262 	ret = qmp_send(power->qmp, fmt, enable ? '1' : '0');
263 	if (ret)
264 		dev_err(power->dev, "error %d sending QMP %sable request\n",
265 			ret, enable ? "en" : "dis");
266 }
267 
268 int ipa_power_setup(struct ipa *ipa)
269 {
270 	int ret;
271 
272 	ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
273 
274 	ret = device_init_wakeup(&ipa->pdev->dev, true);
275 	if (ret)
276 		ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
277 
278 	return ret;
279 }
280 
281 void ipa_power_teardown(struct ipa *ipa)
282 {
283 	(void)device_init_wakeup(&ipa->pdev->dev, false);
284 	ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
285 }
286 
287 /* Initialize IPA power management */
288 struct ipa_power *
289 ipa_power_init(struct device *dev, const struct ipa_power_data *data)
290 {
291 	struct ipa_power *power;
292 	struct clk *clk;
293 	size_t size;
294 	int ret;
295 
296 	clk = clk_get(dev, "core");
297 	if (IS_ERR(clk)) {
298 		dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
299 
300 		return ERR_CAST(clk);
301 	}
302 
303 	ret = clk_set_rate(clk, data->core_clock_rate);
304 	if (ret) {
305 		dev_err(dev, "error %d setting core clock rate to %u\n",
306 			ret, data->core_clock_rate);
307 		goto err_clk_put;
308 	}
309 
310 	size = struct_size(power, interconnect, data->interconnect_count);
311 	power = kzalloc(size, GFP_KERNEL);
312 	if (!power) {
313 		ret = -ENOMEM;
314 		goto err_clk_put;
315 	}
316 	power->dev = dev;
317 	power->core = clk;
318 	power->interconnect_count = data->interconnect_count;
319 
320 	ret = ipa_interconnect_init(power, data->interconnect_data);
321 	if (ret)
322 		goto err_kfree;
323 
324 	ret = ipa_power_retention_init(power);
325 	if (ret)
326 		goto err_interconnect_exit;
327 
328 	pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
329 	pm_runtime_use_autosuspend(dev);
330 	pm_runtime_enable(dev);
331 
332 	return power;
333 
334 err_interconnect_exit:
335 	ipa_interconnect_exit(power);
336 err_kfree:
337 	kfree(power);
338 err_clk_put:
339 	clk_put(clk);
340 
341 	return ERR_PTR(ret);
342 }
343 
344 /* Inverse of ipa_power_init() */
345 void ipa_power_exit(struct ipa_power *power)
346 {
347 	struct device *dev = power->dev;
348 	struct clk *clk = power->core;
349 
350 	pm_runtime_disable(dev);
351 	pm_runtime_dont_use_autosuspend(dev);
352 	ipa_power_retention_exit(power);
353 	ipa_interconnect_exit(power);
354 	kfree(power);
355 	clk_put(clk);
356 }
357 
358 const struct dev_pm_ops ipa_pm_ops = {
359 	.suspend		= ipa_suspend,
360 	.resume			= ipa_resume,
361 	.runtime_suspend	= ipa_runtime_suspend,
362 	.runtime_resume		= ipa_runtime_resume,
363 };
364