xref: /linux/drivers/gpu/drm/imagination/pvr_power.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_fw.h"
6 #include "pvr_fw_startstop.h"
7 #include "pvr_power.h"
8 #include "pvr_queue.h"
9 #include "pvr_rogue_fwif.h"
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <drm/drm_print.h>
14 #include <linux/cleanup.h>
15 #include <linux/clk.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/pwrseq/consumer.h>
23 #include <linux/reset.h>
24 #include <linux/timer.h>
25 #include <linux/types.h>
26 #include <linux/workqueue.h>
27 
28 #define POWER_SYNC_TIMEOUT_US (1000000) /* 1s */
29 
30 #define WATCHDOG_TIME_MS (500)
31 
32 /**
33  * pvr_device_lost() - Mark GPU device as lost
34  * @pvr_dev: Target PowerVR device.
35  *
36  * This will cause the DRM device to be unplugged.
37  */
38 void
39 pvr_device_lost(struct pvr_device *pvr_dev)
40 {
41 	if (!pvr_dev->lost) {
42 		pvr_dev->lost = true;
43 		drm_dev_unplug(from_pvr_device(pvr_dev));
44 	}
45 }
46 
47 static int
48 pvr_power_send_command(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *pow_cmd)
49 {
50 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
51 	u32 slot_nr;
52 	u32 value;
53 	int err;
54 
55 	WRITE_ONCE(*fw_dev->power_sync, 0);
56 
57 	err = pvr_kccb_send_cmd_powered(pvr_dev, pow_cmd, &slot_nr);
58 	if (err)
59 		return err;
60 
61 	/* Wait for FW to acknowledge. */
62 	return readl_poll_timeout(pvr_dev->fw_dev.power_sync, value, value != 0, 100,
63 				  POWER_SYNC_TIMEOUT_US);
64 }
65 
66 static int
67 pvr_power_request_idle(struct pvr_device *pvr_dev)
68 {
69 	struct rogue_fwif_kccb_cmd pow_cmd;
70 
71 	/* Send FORCED_IDLE request to FW. */
72 	pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
73 	pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_FORCED_IDLE_REQ;
74 	pow_cmd.cmd_data.pow_data.power_req_data.pow_request_type = ROGUE_FWIF_POWER_FORCE_IDLE;
75 
76 	return pvr_power_send_command(pvr_dev, &pow_cmd);
77 }
78 
79 static int
80 pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
81 {
82 	struct rogue_fwif_kccb_cmd pow_cmd;
83 
84 	/* Send POW_OFF request to firmware. */
85 	pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
86 	pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_OFF_REQ;
87 	pow_cmd.cmd_data.pow_data.power_req_data.forced = true;
88 
89 	return pvr_power_send_command(pvr_dev, &pow_cmd);
90 }
91 
92 static int
93 pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset)
94 {
95 	if (!hard_reset) {
96 		int err;
97 
98 		cancel_delayed_work_sync(&pvr_dev->watchdog.work);
99 
100 		err = pvr_power_request_idle(pvr_dev);
101 		if (err)
102 			return err;
103 
104 		err = pvr_power_request_pwr_off(pvr_dev);
105 		if (err)
106 			return err;
107 	}
108 
109 	return pvr_fw_stop(pvr_dev);
110 }
111 
112 static int
113 pvr_power_fw_enable(struct pvr_device *pvr_dev)
114 {
115 	int err;
116 
117 	err = pvr_fw_start(pvr_dev);
118 	if (err)
119 		return err;
120 
121 	err = pvr_wait_for_fw_boot(pvr_dev);
122 	if (err) {
123 		drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
124 		pvr_fw_stop(pvr_dev);
125 		return err;
126 	}
127 
128 	queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
129 			   msecs_to_jiffies(WATCHDOG_TIME_MS));
130 
131 	return 0;
132 }
133 
134 bool
135 pvr_power_is_idle(struct pvr_device *pvr_dev)
136 {
137 	/*
138 	 * FW power state can be out of date if a KCCB command has been submitted but the FW hasn't
139 	 * started processing it yet. So also check the KCCB status.
140 	 */
141 	enum rogue_fwif_pow_state pow_state = READ_ONCE(pvr_dev->fw_dev.fwif_sysdata->pow_state);
142 	bool kccb_idle = pvr_kccb_is_idle(pvr_dev);
143 
144 	return (pow_state == ROGUE_FWIF_POW_IDLE) && kccb_idle;
145 }
146 
147 static bool
148 pvr_watchdog_kccb_stalled(struct pvr_device *pvr_dev)
149 {
150 	/* Check KCCB commands are progressing. */
151 	u32 kccb_cmds_executed = pvr_dev->fw_dev.fwif_osdata->kccb_cmds_executed;
152 	bool kccb_is_idle = pvr_kccb_is_idle(pvr_dev);
153 
154 	if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed && !kccb_is_idle) {
155 		pvr_dev->watchdog.kccb_stall_count++;
156 
157 		/*
158 		 * If we have commands pending with no progress for 2 consecutive polls then
159 		 * consider KCCB command processing stalled.
160 		 */
161 		if (pvr_dev->watchdog.kccb_stall_count == 2) {
162 			pvr_dev->watchdog.kccb_stall_count = 0;
163 			return true;
164 		}
165 	} else if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed) {
166 		bool has_active_contexts;
167 
168 		mutex_lock(&pvr_dev->queues.lock);
169 		has_active_contexts = list_empty(&pvr_dev->queues.active);
170 		mutex_unlock(&pvr_dev->queues.lock);
171 
172 		if (has_active_contexts) {
173 			/* Send a HEALTH_CHECK command so we can verify FW is still alive. */
174 			struct rogue_fwif_kccb_cmd health_check_cmd;
175 
176 			health_check_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK;
177 
178 			pvr_kccb_send_cmd_powered(pvr_dev, &health_check_cmd, NULL);
179 		}
180 	} else {
181 		pvr_dev->watchdog.old_kccb_cmds_executed = kccb_cmds_executed;
182 		pvr_dev->watchdog.kccb_stall_count = 0;
183 	}
184 
185 	return false;
186 }
187 
188 static void
189 pvr_watchdog_worker(struct work_struct *work)
190 {
191 	struct pvr_device *pvr_dev = container_of(work, struct pvr_device,
192 						  watchdog.work.work);
193 	bool stalled;
194 
195 	if (pvr_dev->lost)
196 		return;
197 
198 	if (pm_runtime_get_if_in_use(from_pvr_device(pvr_dev)->dev) <= 0)
199 		goto out_requeue;
200 
201 	if (!pvr_dev->fw_dev.booted)
202 		goto out_pm_runtime_put;
203 
204 	stalled = pvr_watchdog_kccb_stalled(pvr_dev);
205 
206 	if (stalled) {
207 		drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
208 
209 		pvr_power_reset(pvr_dev, true);
210 		/* Device may be lost at this point. */
211 	}
212 
213 out_pm_runtime_put:
214 	pm_runtime_put(from_pvr_device(pvr_dev)->dev);
215 
216 out_requeue:
217 	if (!pvr_dev->lost) {
218 		queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
219 				   msecs_to_jiffies(WATCHDOG_TIME_MS));
220 	}
221 }
222 
223 /**
224  * pvr_watchdog_init() - Initialise watchdog for device
225  * @pvr_dev: Target PowerVR device.
226  *
227  * Returns:
228  *  * 0 on success, or
229  *  * -%ENOMEM on out of memory.
230  */
231 int
232 pvr_watchdog_init(struct pvr_device *pvr_dev)
233 {
234 	INIT_DELAYED_WORK(&pvr_dev->watchdog.work, pvr_watchdog_worker);
235 
236 	return 0;
237 }
238 
239 static int pvr_power_init_manual(struct pvr_device *pvr_dev)
240 {
241 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
242 	struct reset_control *reset;
243 
244 	reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
245 	if (IS_ERR(reset))
246 		return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
247 				     "failed to get gpu reset line\n");
248 
249 	pvr_dev->reset = reset;
250 
251 	return 0;
252 }
253 
254 static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev)
255 {
256 	int err;
257 
258 	err = clk_prepare_enable(pvr_dev->core_clk);
259 	if (err)
260 		return err;
261 
262 	err = clk_prepare_enable(pvr_dev->sys_clk);
263 	if (err)
264 		goto err_core_clk_disable;
265 
266 	err = clk_prepare_enable(pvr_dev->mem_clk);
267 	if (err)
268 		goto err_sys_clk_disable;
269 
270 	/*
271 	 * According to the hardware manual, a delay of at least 32 clock
272 	 * cycles is required between de-asserting the clkgen reset and
273 	 * de-asserting the GPU reset. Assuming a worst-case scenario with
274 	 * a very high GPU clock frequency, a delay of 1 microsecond is
275 	 * sufficient to ensure this requirement is met across all
276 	 * feasible GPU clock speeds.
277 	 */
278 	udelay(1);
279 
280 	err = reset_control_deassert(pvr_dev->reset);
281 	if (err)
282 		goto err_mem_clk_disable;
283 
284 	return 0;
285 
286 err_mem_clk_disable:
287 	clk_disable_unprepare(pvr_dev->mem_clk);
288 
289 err_sys_clk_disable:
290 	clk_disable_unprepare(pvr_dev->sys_clk);
291 
292 err_core_clk_disable:
293 	clk_disable_unprepare(pvr_dev->core_clk);
294 
295 	return err;
296 }
297 
298 static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev)
299 {
300 	int err;
301 
302 	err = reset_control_assert(pvr_dev->reset);
303 
304 	clk_disable_unprepare(pvr_dev->mem_clk);
305 	clk_disable_unprepare(pvr_dev->sys_clk);
306 	clk_disable_unprepare(pvr_dev->core_clk);
307 
308 	return err;
309 }
310 
311 const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = {
312 	.init = pvr_power_init_manual,
313 	.power_on = pvr_power_on_sequence_manual,
314 	.power_off = pvr_power_off_sequence_manual,
315 };
316 
317 static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev)
318 {
319 	struct device *dev = from_pvr_device(pvr_dev)->dev;
320 
321 	pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power");
322 	if (IS_ERR(pvr_dev->pwrseq)) {
323 		/*
324 		 * This platform requires a sequencer. If we can't get it, we
325 		 * must return the error (including -EPROBE_DEFER to wait for
326 		 * the provider to appear)
327 		 */
328 		return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq),
329 				     "Failed to get required power sequencer\n");
330 	}
331 
332 	return 0;
333 }
334 
335 static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev)
336 {
337 	return pwrseq_power_on(pvr_dev->pwrseq);
338 }
339 
340 static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev)
341 {
342 	return pwrseq_power_off(pvr_dev->pwrseq);
343 }
344 
345 const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = {
346 	.init = pvr_power_init_pwrseq,
347 	.power_on = pvr_power_on_sequence_pwrseq,
348 	.power_off = pvr_power_off_sequence_pwrseq,
349 };
350 
351 int
352 pvr_power_device_suspend(struct device *dev)
353 {
354 	struct platform_device *plat_dev = to_platform_device(dev);
355 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
356 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
357 	int err = 0;
358 	int idx;
359 
360 	if (!drm_dev_enter(drm_dev, &idx))
361 		return -EIO;
362 
363 	if (pvr_dev->fw_dev.booted) {
364 		err = pvr_power_fw_disable(pvr_dev, false);
365 		if (err)
366 			goto err_drm_dev_exit;
367 	}
368 
369 	err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
370 
371 err_drm_dev_exit:
372 	drm_dev_exit(idx);
373 
374 	return err;
375 }
376 
377 int
378 pvr_power_device_resume(struct device *dev)
379 {
380 	struct platform_device *plat_dev = to_platform_device(dev);
381 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
382 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
383 	int idx;
384 	int err;
385 
386 	if (!drm_dev_enter(drm_dev, &idx))
387 		return -EIO;
388 
389 	err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev);
390 	if (err)
391 		goto err_drm_dev_exit;
392 
393 	if (pvr_dev->fw_dev.booted) {
394 		err = pvr_power_fw_enable(pvr_dev);
395 		if (err)
396 			goto err_power_off;
397 	}
398 
399 	drm_dev_exit(idx);
400 
401 	return 0;
402 
403 err_power_off:
404 	pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
405 
406 err_drm_dev_exit:
407 	drm_dev_exit(idx);
408 
409 	return err;
410 }
411 
412 int
413 pvr_power_device_idle(struct device *dev)
414 {
415 	struct platform_device *plat_dev = to_platform_device(dev);
416 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
417 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
418 
419 	return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
420 }
421 
422 static int
423 pvr_power_clear_error(struct pvr_device *pvr_dev)
424 {
425 	struct device *dev = from_pvr_device(pvr_dev)->dev;
426 	int err;
427 
428 	/* Ensure the device state is known and nothing is happening past this point */
429 	pm_runtime_disable(dev);
430 
431 	/* Attempt to clear the runtime PM error by setting the current state again */
432 	if (pm_runtime_status_suspended(dev))
433 		err = pm_runtime_set_suspended(dev);
434 	else
435 		err = pm_runtime_set_active(dev);
436 
437 	if (err) {
438 		drm_err(from_pvr_device(pvr_dev),
439 			"%s: Failed to clear runtime PM error (new error %d)\n",
440 			__func__, err);
441 	}
442 
443 	pm_runtime_enable(dev);
444 
445 	return err;
446 }
447 
448 /**
449  * pvr_power_get_clear() - Acquire a power reference, correcting any errors
450  * @pvr_dev: Device pointer
451  *
452  * Attempt to acquire a power reference on the device. If the runtime PM
453  * is in error state, attempt to clear the error and retry.
454  *
455  * Returns:
456  *  * 0 on success, or
457  *  * Any error code returned by pvr_power_get() or the runtime PM API.
458  */
459 static int
460 pvr_power_get_clear(struct pvr_device *pvr_dev)
461 {
462 	int err;
463 
464 	err = pvr_power_get(pvr_dev);
465 	if (err == 0)
466 		return err;
467 
468 	drm_warn(from_pvr_device(pvr_dev),
469 		 "%s: pvr_power_get returned error %d, attempting recovery\n",
470 		 __func__, err);
471 
472 	err = pvr_power_clear_error(pvr_dev);
473 	if (err)
474 		return err;
475 
476 	return pvr_power_get(pvr_dev);
477 }
478 
479 /**
480  * pvr_power_reset() - Reset the GPU
481  * @pvr_dev: Device pointer
482  * @hard_reset: %true for hard reset, %false for soft reset
483  *
484  * If @hard_reset is %false and the FW processor fails to respond during the reset process, this
485  * function will attempt a hard reset.
486  *
487  * If a hard reset fails then the GPU device is reported as lost.
488  *
489  * Returns:
490  *  * 0 on success, or
491  *  * Any error code returned by pvr_power_get, pvr_power_fw_disable or pvr_power_fw_enable().
492  */
493 int
494 pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
495 {
496 	bool queues_disabled = false;
497 	int err;
498 
499 	/*
500 	 * Take a power reference during the reset. This should prevent any interference with the
501 	 * power state during reset.
502 	 */
503 	WARN_ON(pvr_power_get_clear(pvr_dev));
504 
505 	down_write(&pvr_dev->reset_sem);
506 
507 	if (pvr_dev->lost) {
508 		err = -EIO;
509 		goto err_up_write;
510 	}
511 
512 	/* Disable IRQs for the duration of the reset. */
513 	disable_irq(pvr_dev->irq);
514 
515 	do {
516 		if (hard_reset) {
517 			pvr_queue_device_pre_reset(pvr_dev);
518 			queues_disabled = true;
519 		}
520 
521 		err = pvr_power_fw_disable(pvr_dev, hard_reset);
522 		if (!err) {
523 			if (hard_reset) {
524 				pvr_dev->fw_dev.booted = false;
525 				WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev));
526 
527 				err = pvr_fw_hard_reset(pvr_dev);
528 				if (err)
529 					goto err_device_lost;
530 
531 				err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev);
532 				pvr_dev->fw_dev.booted = true;
533 				if (err)
534 					goto err_device_lost;
535 			} else {
536 				/* Clear the FW faulted flags. */
537 				pvr_dev->fw_dev.fwif_sysdata->hwr_state_flags &=
538 					~(ROGUE_FWIF_HWR_FW_FAULT |
539 					  ROGUE_FWIF_HWR_RESTART_REQUESTED);
540 			}
541 
542 			pvr_fw_irq_clear(pvr_dev);
543 
544 			err = pvr_power_fw_enable(pvr_dev);
545 		}
546 
547 		if (err && hard_reset)
548 			goto err_device_lost;
549 
550 		if (err && !hard_reset) {
551 			drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
552 			hard_reset = true;
553 		}
554 	} while (err);
555 
556 	if (queues_disabled)
557 		pvr_queue_device_post_reset(pvr_dev);
558 
559 	enable_irq(pvr_dev->irq);
560 
561 	up_write(&pvr_dev->reset_sem);
562 
563 	pvr_power_put(pvr_dev);
564 
565 	return 0;
566 
567 err_device_lost:
568 	drm_err(from_pvr_device(pvr_dev), "GPU device lost");
569 	pvr_device_lost(pvr_dev);
570 
571 	/* Leave IRQs disabled if the device is lost. */
572 
573 	if (queues_disabled)
574 		pvr_queue_device_post_reset(pvr_dev);
575 
576 err_up_write:
577 	up_write(&pvr_dev->reset_sem);
578 
579 	pvr_power_put(pvr_dev);
580 
581 	return err;
582 }
583 
584 /**
585  * pvr_watchdog_fini() - Shutdown watchdog for device
586  * @pvr_dev: Target PowerVR device.
587  */
588 void
589 pvr_watchdog_fini(struct pvr_device *pvr_dev)
590 {
591 	cancel_delayed_work_sync(&pvr_dev->watchdog.work);
592 }
593 
594 int pvr_power_domains_init(struct pvr_device *pvr_dev)
595 {
596 	struct device *dev = from_pvr_device(pvr_dev)->dev;
597 
598 	struct device_link **domain_links __free(kfree) = NULL;
599 	struct device **domain_devs __free(kfree) = NULL;
600 	int domain_count;
601 	int link_count;
602 
603 	char dev_name[2] = "a";
604 	int err;
605 	int i;
606 
607 	domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
608 						  "#power-domain-cells");
609 	if (domain_count < 0)
610 		return domain_count;
611 
612 	if (domain_count <= 1)
613 		return 0;
614 
615 	link_count = domain_count + (domain_count - 1);
616 
617 	domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL);
618 	if (!domain_devs)
619 		return -ENOMEM;
620 
621 	domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL);
622 	if (!domain_links)
623 		return -ENOMEM;
624 
625 	for (i = 0; i < domain_count; i++) {
626 		struct device *domain_dev;
627 
628 		dev_name[0] = 'a' + i;
629 		domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
630 		if (IS_ERR_OR_NULL(domain_dev)) {
631 			err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
632 			goto err_detach;
633 		}
634 
635 		domain_devs[i] = domain_dev;
636 	}
637 
638 	for (i = 0; i < domain_count; i++) {
639 		struct device_link *link;
640 
641 		link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
642 		if (!link) {
643 			err = -ENODEV;
644 			goto err_unlink;
645 		}
646 
647 		domain_links[i] = link;
648 	}
649 
650 	for (i = domain_count; i < link_count; i++) {
651 		struct device_link *link;
652 
653 		link = device_link_add(domain_devs[i - domain_count + 1],
654 				       domain_devs[i - domain_count],
655 				       DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
656 		if (!link) {
657 			err = -ENODEV;
658 			goto err_unlink;
659 		}
660 
661 		domain_links[i] = link;
662 	}
663 
664 	pvr_dev->power = (struct pvr_device_power){
665 		.domain_devs = no_free_ptr(domain_devs),
666 		.domain_links = no_free_ptr(domain_links),
667 		.domain_count = domain_count,
668 	};
669 
670 	return 0;
671 
672 err_unlink:
673 	while (--i >= 0)
674 		device_link_del(domain_links[i]);
675 
676 	i = domain_count;
677 
678 err_detach:
679 	while (--i >= 0)
680 		dev_pm_domain_detach(domain_devs[i], true);
681 
682 	return err;
683 }
684 
685 void pvr_power_domains_fini(struct pvr_device *pvr_dev)
686 {
687 	const int domain_count = pvr_dev->power.domain_count;
688 
689 	int i = domain_count + (domain_count - 1);
690 
691 	while (--i >= 0)
692 		device_link_del(pvr_dev->power.domain_links[i]);
693 
694 	i = domain_count;
695 
696 	while (--i >= 0)
697 		dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
698 
699 	kfree(pvr_dev->power.domain_links);
700 	kfree(pvr_dev->power.domain_devs);
701 
702 	pvr_dev->power = (struct pvr_device_power){ 0 };
703 }
704