xref: /linux/drivers/gpu/drm/imagination/pvr_power.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_fw.h"
6 #include "pvr_fw_startstop.h"
7 #include "pvr_power.h"
8 #include "pvr_queue.h"
9 #include "pvr_rogue_fwif.h"
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <linux/clk.h>
14 #include <linux/interrupt.h>
15 #include <linux/mutex.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/timer.h>
19 #include <linux/types.h>
20 #include <linux/workqueue.h>
21 
22 #define POWER_SYNC_TIMEOUT_US (1000000) /* 1s */
23 
24 #define WATCHDOG_TIME_MS (500)
25 
26 /**
27  * pvr_device_lost() - Mark GPU device as lost
28  * @pvr_dev: Target PowerVR device.
29  *
30  * This will cause the DRM device to be unplugged.
31  */
32 void
33 pvr_device_lost(struct pvr_device *pvr_dev)
34 {
35 	if (!pvr_dev->lost) {
36 		pvr_dev->lost = true;
37 		drm_dev_unplug(from_pvr_device(pvr_dev));
38 	}
39 }
40 
41 static int
42 pvr_power_send_command(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *pow_cmd)
43 {
44 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
45 	u32 slot_nr;
46 	u32 value;
47 	int err;
48 
49 	WRITE_ONCE(*fw_dev->power_sync, 0);
50 
51 	err = pvr_kccb_send_cmd_powered(pvr_dev, pow_cmd, &slot_nr);
52 	if (err)
53 		return err;
54 
55 	/* Wait for FW to acknowledge. */
56 	return readl_poll_timeout(pvr_dev->fw_dev.power_sync, value, value != 0, 100,
57 				  POWER_SYNC_TIMEOUT_US);
58 }
59 
60 static int
61 pvr_power_request_idle(struct pvr_device *pvr_dev)
62 {
63 	struct rogue_fwif_kccb_cmd pow_cmd;
64 
65 	/* Send FORCED_IDLE request to FW. */
66 	pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
67 	pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_FORCED_IDLE_REQ;
68 	pow_cmd.cmd_data.pow_data.power_req_data.pow_request_type = ROGUE_FWIF_POWER_FORCE_IDLE;
69 
70 	return pvr_power_send_command(pvr_dev, &pow_cmd);
71 }
72 
73 static int
74 pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
75 {
76 	struct rogue_fwif_kccb_cmd pow_cmd;
77 
78 	/* Send POW_OFF request to firmware. */
79 	pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
80 	pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_OFF_REQ;
81 	pow_cmd.cmd_data.pow_data.power_req_data.forced = true;
82 
83 	return pvr_power_send_command(pvr_dev, &pow_cmd);
84 }
85 
86 static int
87 pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset)
88 {
89 	if (!hard_reset) {
90 		int err;
91 
92 		cancel_delayed_work_sync(&pvr_dev->watchdog.work);
93 
94 		err = pvr_power_request_idle(pvr_dev);
95 		if (err)
96 			return err;
97 
98 		err = pvr_power_request_pwr_off(pvr_dev);
99 		if (err)
100 			return err;
101 	}
102 
103 	return pvr_fw_stop(pvr_dev);
104 }
105 
106 static int
107 pvr_power_fw_enable(struct pvr_device *pvr_dev)
108 {
109 	int err;
110 
111 	err = pvr_fw_start(pvr_dev);
112 	if (err)
113 		return err;
114 
115 	err = pvr_wait_for_fw_boot(pvr_dev);
116 	if (err) {
117 		drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
118 		pvr_fw_stop(pvr_dev);
119 		return err;
120 	}
121 
122 	queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
123 			   msecs_to_jiffies(WATCHDOG_TIME_MS));
124 
125 	return 0;
126 }
127 
128 bool
129 pvr_power_is_idle(struct pvr_device *pvr_dev)
130 {
131 	/*
132 	 * FW power state can be out of date if a KCCB command has been submitted but the FW hasn't
133 	 * started processing it yet. So also check the KCCB status.
134 	 */
135 	enum rogue_fwif_pow_state pow_state = READ_ONCE(pvr_dev->fw_dev.fwif_sysdata->pow_state);
136 	bool kccb_idle = pvr_kccb_is_idle(pvr_dev);
137 
138 	return (pow_state == ROGUE_FWIF_POW_IDLE) && kccb_idle;
139 }
140 
141 static bool
142 pvr_watchdog_kccb_stalled(struct pvr_device *pvr_dev)
143 {
144 	/* Check KCCB commands are progressing. */
145 	u32 kccb_cmds_executed = pvr_dev->fw_dev.fwif_osdata->kccb_cmds_executed;
146 	bool kccb_is_idle = pvr_kccb_is_idle(pvr_dev);
147 
148 	if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed && !kccb_is_idle) {
149 		pvr_dev->watchdog.kccb_stall_count++;
150 
151 		/*
152 		 * If we have commands pending with no progress for 2 consecutive polls then
153 		 * consider KCCB command processing stalled.
154 		 */
155 		if (pvr_dev->watchdog.kccb_stall_count == 2) {
156 			pvr_dev->watchdog.kccb_stall_count = 0;
157 			return true;
158 		}
159 	} else if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed) {
160 		bool has_active_contexts;
161 
162 		mutex_lock(&pvr_dev->queues.lock);
163 		has_active_contexts = list_empty(&pvr_dev->queues.active);
164 		mutex_unlock(&pvr_dev->queues.lock);
165 
166 		if (has_active_contexts) {
167 			/* Send a HEALTH_CHECK command so we can verify FW is still alive. */
168 			struct rogue_fwif_kccb_cmd health_check_cmd;
169 
170 			health_check_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK;
171 
172 			pvr_kccb_send_cmd_powered(pvr_dev, &health_check_cmd, NULL);
173 		}
174 	} else {
175 		pvr_dev->watchdog.old_kccb_cmds_executed = kccb_cmds_executed;
176 		pvr_dev->watchdog.kccb_stall_count = 0;
177 	}
178 
179 	return false;
180 }
181 
182 static void
183 pvr_watchdog_worker(struct work_struct *work)
184 {
185 	struct pvr_device *pvr_dev = container_of(work, struct pvr_device,
186 						  watchdog.work.work);
187 	bool stalled;
188 
189 	if (pvr_dev->lost)
190 		return;
191 
192 	if (pm_runtime_get_if_in_use(from_pvr_device(pvr_dev)->dev) <= 0)
193 		goto out_requeue;
194 
195 	if (!pvr_dev->fw_dev.booted)
196 		goto out_pm_runtime_put;
197 
198 	stalled = pvr_watchdog_kccb_stalled(pvr_dev);
199 
200 	if (stalled) {
201 		drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
202 
203 		pvr_power_reset(pvr_dev, true);
204 		/* Device may be lost at this point. */
205 	}
206 
207 out_pm_runtime_put:
208 	pm_runtime_put(from_pvr_device(pvr_dev)->dev);
209 
210 out_requeue:
211 	if (!pvr_dev->lost) {
212 		queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
213 				   msecs_to_jiffies(WATCHDOG_TIME_MS));
214 	}
215 }
216 
217 /**
218  * pvr_watchdog_init() - Initialise watchdog for device
219  * @pvr_dev: Target PowerVR device.
220  *
221  * Returns:
222  *  * 0 on success, or
223  *  * -%ENOMEM on out of memory.
224  */
225 int
226 pvr_watchdog_init(struct pvr_device *pvr_dev)
227 {
228 	INIT_DELAYED_WORK(&pvr_dev->watchdog.work, pvr_watchdog_worker);
229 
230 	return 0;
231 }
232 
233 int
234 pvr_power_device_suspend(struct device *dev)
235 {
236 	struct platform_device *plat_dev = to_platform_device(dev);
237 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
238 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
239 	int err = 0;
240 	int idx;
241 
242 	if (!drm_dev_enter(drm_dev, &idx))
243 		return -EIO;
244 
245 	if (pvr_dev->fw_dev.booted) {
246 		err = pvr_power_fw_disable(pvr_dev, false);
247 		if (err)
248 			goto err_drm_dev_exit;
249 	}
250 
251 	clk_disable_unprepare(pvr_dev->mem_clk);
252 	clk_disable_unprepare(pvr_dev->sys_clk);
253 	clk_disable_unprepare(pvr_dev->core_clk);
254 
255 err_drm_dev_exit:
256 	drm_dev_exit(idx);
257 
258 	return err;
259 }
260 
261 int
262 pvr_power_device_resume(struct device *dev)
263 {
264 	struct platform_device *plat_dev = to_platform_device(dev);
265 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
266 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
267 	int idx;
268 	int err;
269 
270 	if (!drm_dev_enter(drm_dev, &idx))
271 		return -EIO;
272 
273 	err = clk_prepare_enable(pvr_dev->core_clk);
274 	if (err)
275 		goto err_drm_dev_exit;
276 
277 	err = clk_prepare_enable(pvr_dev->sys_clk);
278 	if (err)
279 		goto err_core_clk_disable;
280 
281 	err = clk_prepare_enable(pvr_dev->mem_clk);
282 	if (err)
283 		goto err_sys_clk_disable;
284 
285 	if (pvr_dev->fw_dev.booted) {
286 		err = pvr_power_fw_enable(pvr_dev);
287 		if (err)
288 			goto err_mem_clk_disable;
289 	}
290 
291 	drm_dev_exit(idx);
292 
293 	return 0;
294 
295 err_mem_clk_disable:
296 	clk_disable_unprepare(pvr_dev->mem_clk);
297 
298 err_sys_clk_disable:
299 	clk_disable_unprepare(pvr_dev->sys_clk);
300 
301 err_core_clk_disable:
302 	clk_disable_unprepare(pvr_dev->core_clk);
303 
304 err_drm_dev_exit:
305 	drm_dev_exit(idx);
306 
307 	return err;
308 }
309 
310 int
311 pvr_power_device_idle(struct device *dev)
312 {
313 	struct platform_device *plat_dev = to_platform_device(dev);
314 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
315 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
316 
317 	return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
318 }
319 
320 /**
321  * pvr_power_reset() - Reset the GPU
322  * @pvr_dev: Device pointer
323  * @hard_reset: %true for hard reset, %false for soft reset
324  *
325  * If @hard_reset is %false and the FW processor fails to respond during the reset process, this
326  * function will attempt a hard reset.
327  *
328  * If a hard reset fails then the GPU device is reported as lost.
329  *
330  * Returns:
331  *  * 0 on success, or
332  *  * Any error code returned by pvr_power_get, pvr_power_fw_disable or pvr_power_fw_enable().
333  */
334 int
335 pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
336 {
337 	bool queues_disabled = false;
338 	int err;
339 
340 	/*
341 	 * Take a power reference during the reset. This should prevent any interference with the
342 	 * power state during reset.
343 	 */
344 	WARN_ON(pvr_power_get(pvr_dev));
345 
346 	down_write(&pvr_dev->reset_sem);
347 
348 	if (pvr_dev->lost) {
349 		err = -EIO;
350 		goto err_up_write;
351 	}
352 
353 	/* Disable IRQs for the duration of the reset. */
354 	disable_irq(pvr_dev->irq);
355 
356 	do {
357 		if (hard_reset) {
358 			pvr_queue_device_pre_reset(pvr_dev);
359 			queues_disabled = true;
360 		}
361 
362 		err = pvr_power_fw_disable(pvr_dev, hard_reset);
363 		if (!err) {
364 			if (hard_reset) {
365 				pvr_dev->fw_dev.booted = false;
366 				WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev));
367 
368 				err = pvr_fw_hard_reset(pvr_dev);
369 				if (err)
370 					goto err_device_lost;
371 
372 				err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev);
373 				pvr_dev->fw_dev.booted = true;
374 				if (err)
375 					goto err_device_lost;
376 			} else {
377 				/* Clear the FW faulted flags. */
378 				pvr_dev->fw_dev.fwif_sysdata->hwr_state_flags &=
379 					~(ROGUE_FWIF_HWR_FW_FAULT |
380 					  ROGUE_FWIF_HWR_RESTART_REQUESTED);
381 			}
382 
383 			pvr_fw_irq_clear(pvr_dev);
384 
385 			err = pvr_power_fw_enable(pvr_dev);
386 		}
387 
388 		if (err && hard_reset)
389 			goto err_device_lost;
390 
391 		if (err && !hard_reset) {
392 			drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
393 			hard_reset = true;
394 		}
395 	} while (err);
396 
397 	if (queues_disabled)
398 		pvr_queue_device_post_reset(pvr_dev);
399 
400 	enable_irq(pvr_dev->irq);
401 
402 	up_write(&pvr_dev->reset_sem);
403 
404 	pvr_power_put(pvr_dev);
405 
406 	return 0;
407 
408 err_device_lost:
409 	drm_err(from_pvr_device(pvr_dev), "GPU device lost");
410 	pvr_device_lost(pvr_dev);
411 
412 	/* Leave IRQs disabled if the device is lost. */
413 
414 	if (queues_disabled)
415 		pvr_queue_device_post_reset(pvr_dev);
416 
417 err_up_write:
418 	up_write(&pvr_dev->reset_sem);
419 
420 	pvr_power_put(pvr_dev);
421 
422 	return err;
423 }
424 
425 /**
426  * pvr_watchdog_fini() - Shutdown watchdog for device
427  * @pvr_dev: Target PowerVR device.
428  */
429 void
430 pvr_watchdog_fini(struct pvr_device *pvr_dev)
431 {
432 	cancel_delayed_work_sync(&pvr_dev->watchdog.work);
433 }
434