xref: /linux/drivers/gpu/drm/imagination/pvr_power.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_fw.h"
6 #include "pvr_fw_startstop.h"
7 #include "pvr_power.h"
8 #include "pvr_queue.h"
9 #include "pvr_rogue_fwif.h"
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <linux/cleanup.h>
14 #include <linux/clk.h>
15 #include <linux/interrupt.h>
16 #include <linux/mutex.h>
17 #include <linux/of.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_domain.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/pwrseq/consumer.h>
22 #include <linux/reset.h>
23 #include <linux/timer.h>
24 #include <linux/types.h>
25 #include <linux/workqueue.h>
26 
27 #define POWER_SYNC_TIMEOUT_US (1000000) /* 1s */
28 
29 #define WATCHDOG_TIME_MS (500)
30 
31 /**
32  * pvr_device_lost() - Mark GPU device as lost
33  * @pvr_dev: Target PowerVR device.
34  *
35  * This will cause the DRM device to be unplugged.
36  */
37 void
38 pvr_device_lost(struct pvr_device *pvr_dev)
39 {
40 	if (!pvr_dev->lost) {
41 		pvr_dev->lost = true;
42 		drm_dev_unplug(from_pvr_device(pvr_dev));
43 	}
44 }
45 
46 static int
47 pvr_power_send_command(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *pow_cmd)
48 {
49 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
50 	u32 slot_nr;
51 	u32 value;
52 	int err;
53 
54 	WRITE_ONCE(*fw_dev->power_sync, 0);
55 
56 	err = pvr_kccb_send_cmd_powered(pvr_dev, pow_cmd, &slot_nr);
57 	if (err)
58 		return err;
59 
60 	/* Wait for FW to acknowledge. */
61 	return readl_poll_timeout(pvr_dev->fw_dev.power_sync, value, value != 0, 100,
62 				  POWER_SYNC_TIMEOUT_US);
63 }
64 
65 static int
66 pvr_power_request_idle(struct pvr_device *pvr_dev)
67 {
68 	struct rogue_fwif_kccb_cmd pow_cmd;
69 
70 	/* Send FORCED_IDLE request to FW. */
71 	pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
72 	pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_FORCED_IDLE_REQ;
73 	pow_cmd.cmd_data.pow_data.power_req_data.pow_request_type = ROGUE_FWIF_POWER_FORCE_IDLE;
74 
75 	return pvr_power_send_command(pvr_dev, &pow_cmd);
76 }
77 
78 static int
79 pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
80 {
81 	struct rogue_fwif_kccb_cmd pow_cmd;
82 
83 	/* Send POW_OFF request to firmware. */
84 	pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
85 	pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_OFF_REQ;
86 	pow_cmd.cmd_data.pow_data.power_req_data.forced = true;
87 
88 	return pvr_power_send_command(pvr_dev, &pow_cmd);
89 }
90 
91 static int
92 pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset)
93 {
94 	if (!hard_reset) {
95 		int err;
96 
97 		cancel_delayed_work_sync(&pvr_dev->watchdog.work);
98 
99 		err = pvr_power_request_idle(pvr_dev);
100 		if (err)
101 			return err;
102 
103 		err = pvr_power_request_pwr_off(pvr_dev);
104 		if (err)
105 			return err;
106 	}
107 
108 	return pvr_fw_stop(pvr_dev);
109 }
110 
111 static int
112 pvr_power_fw_enable(struct pvr_device *pvr_dev)
113 {
114 	int err;
115 
116 	err = pvr_fw_start(pvr_dev);
117 	if (err)
118 		return err;
119 
120 	err = pvr_wait_for_fw_boot(pvr_dev);
121 	if (err) {
122 		drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
123 		pvr_fw_stop(pvr_dev);
124 		return err;
125 	}
126 
127 	queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
128 			   msecs_to_jiffies(WATCHDOG_TIME_MS));
129 
130 	return 0;
131 }
132 
133 bool
134 pvr_power_is_idle(struct pvr_device *pvr_dev)
135 {
136 	/*
137 	 * FW power state can be out of date if a KCCB command has been submitted but the FW hasn't
138 	 * started processing it yet. So also check the KCCB status.
139 	 */
140 	enum rogue_fwif_pow_state pow_state = READ_ONCE(pvr_dev->fw_dev.fwif_sysdata->pow_state);
141 	bool kccb_idle = pvr_kccb_is_idle(pvr_dev);
142 
143 	return (pow_state == ROGUE_FWIF_POW_IDLE) && kccb_idle;
144 }
145 
146 static bool
147 pvr_watchdog_kccb_stalled(struct pvr_device *pvr_dev)
148 {
149 	/* Check KCCB commands are progressing. */
150 	u32 kccb_cmds_executed = pvr_dev->fw_dev.fwif_osdata->kccb_cmds_executed;
151 	bool kccb_is_idle = pvr_kccb_is_idle(pvr_dev);
152 
153 	if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed && !kccb_is_idle) {
154 		pvr_dev->watchdog.kccb_stall_count++;
155 
156 		/*
157 		 * If we have commands pending with no progress for 2 consecutive polls then
158 		 * consider KCCB command processing stalled.
159 		 */
160 		if (pvr_dev->watchdog.kccb_stall_count == 2) {
161 			pvr_dev->watchdog.kccb_stall_count = 0;
162 			return true;
163 		}
164 	} else if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed) {
165 		bool has_active_contexts;
166 
167 		mutex_lock(&pvr_dev->queues.lock);
168 		has_active_contexts = list_empty(&pvr_dev->queues.active);
169 		mutex_unlock(&pvr_dev->queues.lock);
170 
171 		if (has_active_contexts) {
172 			/* Send a HEALTH_CHECK command so we can verify FW is still alive. */
173 			struct rogue_fwif_kccb_cmd health_check_cmd;
174 
175 			health_check_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK;
176 
177 			pvr_kccb_send_cmd_powered(pvr_dev, &health_check_cmd, NULL);
178 		}
179 	} else {
180 		pvr_dev->watchdog.old_kccb_cmds_executed = kccb_cmds_executed;
181 		pvr_dev->watchdog.kccb_stall_count = 0;
182 	}
183 
184 	return false;
185 }
186 
187 static void
188 pvr_watchdog_worker(struct work_struct *work)
189 {
190 	struct pvr_device *pvr_dev = container_of(work, struct pvr_device,
191 						  watchdog.work.work);
192 	bool stalled;
193 
194 	if (pvr_dev->lost)
195 		return;
196 
197 	if (pm_runtime_get_if_in_use(from_pvr_device(pvr_dev)->dev) <= 0)
198 		goto out_requeue;
199 
200 	if (!pvr_dev->fw_dev.booted)
201 		goto out_pm_runtime_put;
202 
203 	stalled = pvr_watchdog_kccb_stalled(pvr_dev);
204 
205 	if (stalled) {
206 		drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
207 
208 		pvr_power_reset(pvr_dev, true);
209 		/* Device may be lost at this point. */
210 	}
211 
212 out_pm_runtime_put:
213 	pm_runtime_put(from_pvr_device(pvr_dev)->dev);
214 
215 out_requeue:
216 	if (!pvr_dev->lost) {
217 		queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
218 				   msecs_to_jiffies(WATCHDOG_TIME_MS));
219 	}
220 }
221 
222 /**
223  * pvr_watchdog_init() - Initialise watchdog for device
224  * @pvr_dev: Target PowerVR device.
225  *
226  * Returns:
227  *  * 0 on success, or
228  *  * -%ENOMEM on out of memory.
229  */
230 int
231 pvr_watchdog_init(struct pvr_device *pvr_dev)
232 {
233 	INIT_DELAYED_WORK(&pvr_dev->watchdog.work, pvr_watchdog_worker);
234 
235 	return 0;
236 }
237 
238 static int pvr_power_init_manual(struct pvr_device *pvr_dev)
239 {
240 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
241 	struct reset_control *reset;
242 
243 	reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
244 	if (IS_ERR(reset))
245 		return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
246 				     "failed to get gpu reset line\n");
247 
248 	pvr_dev->reset = reset;
249 
250 	return 0;
251 }
252 
253 static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev)
254 {
255 	int err;
256 
257 	err = clk_prepare_enable(pvr_dev->core_clk);
258 	if (err)
259 		return err;
260 
261 	err = clk_prepare_enable(pvr_dev->sys_clk);
262 	if (err)
263 		goto err_core_clk_disable;
264 
265 	err = clk_prepare_enable(pvr_dev->mem_clk);
266 	if (err)
267 		goto err_sys_clk_disable;
268 
269 	/*
270 	 * According to the hardware manual, a delay of at least 32 clock
271 	 * cycles is required between de-asserting the clkgen reset and
272 	 * de-asserting the GPU reset. Assuming a worst-case scenario with
273 	 * a very high GPU clock frequency, a delay of 1 microsecond is
274 	 * sufficient to ensure this requirement is met across all
275 	 * feasible GPU clock speeds.
276 	 */
277 	udelay(1);
278 
279 	err = reset_control_deassert(pvr_dev->reset);
280 	if (err)
281 		goto err_mem_clk_disable;
282 
283 	return 0;
284 
285 err_mem_clk_disable:
286 	clk_disable_unprepare(pvr_dev->mem_clk);
287 
288 err_sys_clk_disable:
289 	clk_disable_unprepare(pvr_dev->sys_clk);
290 
291 err_core_clk_disable:
292 	clk_disable_unprepare(pvr_dev->core_clk);
293 
294 	return err;
295 }
296 
297 static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev)
298 {
299 	int err;
300 
301 	err = reset_control_assert(pvr_dev->reset);
302 
303 	clk_disable_unprepare(pvr_dev->mem_clk);
304 	clk_disable_unprepare(pvr_dev->sys_clk);
305 	clk_disable_unprepare(pvr_dev->core_clk);
306 
307 	return err;
308 }
309 
310 const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = {
311 	.init = pvr_power_init_manual,
312 	.power_on = pvr_power_on_sequence_manual,
313 	.power_off = pvr_power_off_sequence_manual,
314 };
315 
316 static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev)
317 {
318 	struct device *dev = from_pvr_device(pvr_dev)->dev;
319 
320 	pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power");
321 	if (IS_ERR(pvr_dev->pwrseq)) {
322 		/*
323 		 * This platform requires a sequencer. If we can't get it, we
324 		 * must return the error (including -EPROBE_DEFER to wait for
325 		 * the provider to appear)
326 		 */
327 		return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq),
328 				     "Failed to get required power sequencer\n");
329 	}
330 
331 	return 0;
332 }
333 
334 static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev)
335 {
336 	return pwrseq_power_on(pvr_dev->pwrseq);
337 }
338 
339 static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev)
340 {
341 	return pwrseq_power_off(pvr_dev->pwrseq);
342 }
343 
344 const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = {
345 	.init = pvr_power_init_pwrseq,
346 	.power_on = pvr_power_on_sequence_pwrseq,
347 	.power_off = pvr_power_off_sequence_pwrseq,
348 };
349 
350 int
351 pvr_power_device_suspend(struct device *dev)
352 {
353 	struct platform_device *plat_dev = to_platform_device(dev);
354 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
355 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
356 	int err = 0;
357 	int idx;
358 
359 	if (!drm_dev_enter(drm_dev, &idx))
360 		return -EIO;
361 
362 	if (pvr_dev->fw_dev.booted) {
363 		err = pvr_power_fw_disable(pvr_dev, false);
364 		if (err)
365 			goto err_drm_dev_exit;
366 	}
367 
368 	err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
369 
370 err_drm_dev_exit:
371 	drm_dev_exit(idx);
372 
373 	return err;
374 }
375 
376 int
377 pvr_power_device_resume(struct device *dev)
378 {
379 	struct platform_device *plat_dev = to_platform_device(dev);
380 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
381 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
382 	int idx;
383 	int err;
384 
385 	if (!drm_dev_enter(drm_dev, &idx))
386 		return -EIO;
387 
388 	err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev);
389 	if (err)
390 		goto err_drm_dev_exit;
391 
392 	if (pvr_dev->fw_dev.booted) {
393 		err = pvr_power_fw_enable(pvr_dev);
394 		if (err)
395 			goto err_power_off;
396 	}
397 
398 	drm_dev_exit(idx);
399 
400 	return 0;
401 
402 err_power_off:
403 	pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
404 
405 err_drm_dev_exit:
406 	drm_dev_exit(idx);
407 
408 	return err;
409 }
410 
411 int
412 pvr_power_device_idle(struct device *dev)
413 {
414 	struct platform_device *plat_dev = to_platform_device(dev);
415 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
416 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
417 
418 	return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
419 }
420 
421 static int
422 pvr_power_clear_error(struct pvr_device *pvr_dev)
423 {
424 	struct device *dev = from_pvr_device(pvr_dev)->dev;
425 	int err;
426 
427 	/* Ensure the device state is known and nothing is happening past this point */
428 	pm_runtime_disable(dev);
429 
430 	/* Attempt to clear the runtime PM error by setting the current state again */
431 	if (pm_runtime_status_suspended(dev))
432 		err = pm_runtime_set_suspended(dev);
433 	else
434 		err = pm_runtime_set_active(dev);
435 
436 	if (err) {
437 		drm_err(from_pvr_device(pvr_dev),
438 			"%s: Failed to clear runtime PM error (new error %d)\n",
439 			__func__, err);
440 	}
441 
442 	pm_runtime_enable(dev);
443 
444 	return err;
445 }
446 
447 /**
448  * pvr_power_get_clear() - Acquire a power reference, correcting any errors
449  * @pvr_dev: Device pointer
450  *
451  * Attempt to acquire a power reference on the device. If the runtime PM
452  * is in error state, attempt to clear the error and retry.
453  *
454  * Returns:
455  *  * 0 on success, or
456  *  * Any error code returned by pvr_power_get() or the runtime PM API.
457  */
458 static int
459 pvr_power_get_clear(struct pvr_device *pvr_dev)
460 {
461 	int err;
462 
463 	err = pvr_power_get(pvr_dev);
464 	if (err == 0)
465 		return err;
466 
467 	drm_warn(from_pvr_device(pvr_dev),
468 		 "%s: pvr_power_get returned error %d, attempting recovery\n",
469 		 __func__, err);
470 
471 	err = pvr_power_clear_error(pvr_dev);
472 	if (err)
473 		return err;
474 
475 	return pvr_power_get(pvr_dev);
476 }
477 
478 /**
479  * pvr_power_reset() - Reset the GPU
480  * @pvr_dev: Device pointer
481  * @hard_reset: %true for hard reset, %false for soft reset
482  *
483  * If @hard_reset is %false and the FW processor fails to respond during the reset process, this
484  * function will attempt a hard reset.
485  *
486  * If a hard reset fails then the GPU device is reported as lost.
487  *
488  * Returns:
489  *  * 0 on success, or
490  *  * Any error code returned by pvr_power_get, pvr_power_fw_disable or pvr_power_fw_enable().
491  */
492 int
493 pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
494 {
495 	bool queues_disabled = false;
496 	int err;
497 
498 	/*
499 	 * Take a power reference during the reset. This should prevent any interference with the
500 	 * power state during reset.
501 	 */
502 	WARN_ON(pvr_power_get_clear(pvr_dev));
503 
504 	down_write(&pvr_dev->reset_sem);
505 
506 	if (pvr_dev->lost) {
507 		err = -EIO;
508 		goto err_up_write;
509 	}
510 
511 	/* Disable IRQs for the duration of the reset. */
512 	disable_irq(pvr_dev->irq);
513 
514 	do {
515 		if (hard_reset) {
516 			pvr_queue_device_pre_reset(pvr_dev);
517 			queues_disabled = true;
518 		}
519 
520 		err = pvr_power_fw_disable(pvr_dev, hard_reset);
521 		if (!err) {
522 			if (hard_reset) {
523 				pvr_dev->fw_dev.booted = false;
524 				WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev));
525 
526 				err = pvr_fw_hard_reset(pvr_dev);
527 				if (err)
528 					goto err_device_lost;
529 
530 				err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev);
531 				pvr_dev->fw_dev.booted = true;
532 				if (err)
533 					goto err_device_lost;
534 			} else {
535 				/* Clear the FW faulted flags. */
536 				pvr_dev->fw_dev.fwif_sysdata->hwr_state_flags &=
537 					~(ROGUE_FWIF_HWR_FW_FAULT |
538 					  ROGUE_FWIF_HWR_RESTART_REQUESTED);
539 			}
540 
541 			pvr_fw_irq_clear(pvr_dev);
542 
543 			err = pvr_power_fw_enable(pvr_dev);
544 		}
545 
546 		if (err && hard_reset)
547 			goto err_device_lost;
548 
549 		if (err && !hard_reset) {
550 			drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
551 			hard_reset = true;
552 		}
553 	} while (err);
554 
555 	if (queues_disabled)
556 		pvr_queue_device_post_reset(pvr_dev);
557 
558 	enable_irq(pvr_dev->irq);
559 
560 	up_write(&pvr_dev->reset_sem);
561 
562 	pvr_power_put(pvr_dev);
563 
564 	return 0;
565 
566 err_device_lost:
567 	drm_err(from_pvr_device(pvr_dev), "GPU device lost");
568 	pvr_device_lost(pvr_dev);
569 
570 	/* Leave IRQs disabled if the device is lost. */
571 
572 	if (queues_disabled)
573 		pvr_queue_device_post_reset(pvr_dev);
574 
575 err_up_write:
576 	up_write(&pvr_dev->reset_sem);
577 
578 	pvr_power_put(pvr_dev);
579 
580 	return err;
581 }
582 
583 /**
584  * pvr_watchdog_fini() - Shutdown watchdog for device
585  * @pvr_dev: Target PowerVR device.
586  */
587 void
588 pvr_watchdog_fini(struct pvr_device *pvr_dev)
589 {
590 	cancel_delayed_work_sync(&pvr_dev->watchdog.work);
591 }
592 
593 int pvr_power_domains_init(struct pvr_device *pvr_dev)
594 {
595 	struct device *dev = from_pvr_device(pvr_dev)->dev;
596 
597 	struct device_link **domain_links __free(kfree) = NULL;
598 	struct device **domain_devs __free(kfree) = NULL;
599 	int domain_count;
600 	int link_count;
601 
602 	char dev_name[2] = "a";
603 	int err;
604 	int i;
605 
606 	domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
607 						  "#power-domain-cells");
608 	if (domain_count < 0)
609 		return domain_count;
610 
611 	if (domain_count <= 1)
612 		return 0;
613 
614 	link_count = domain_count + (domain_count - 1);
615 
616 	domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL);
617 	if (!domain_devs)
618 		return -ENOMEM;
619 
620 	domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL);
621 	if (!domain_links)
622 		return -ENOMEM;
623 
624 	for (i = 0; i < domain_count; i++) {
625 		struct device *domain_dev;
626 
627 		dev_name[0] = 'a' + i;
628 		domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
629 		if (IS_ERR_OR_NULL(domain_dev)) {
630 			err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
631 			goto err_detach;
632 		}
633 
634 		domain_devs[i] = domain_dev;
635 	}
636 
637 	for (i = 0; i < domain_count; i++) {
638 		struct device_link *link;
639 
640 		link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
641 		if (!link) {
642 			err = -ENODEV;
643 			goto err_unlink;
644 		}
645 
646 		domain_links[i] = link;
647 	}
648 
649 	for (i = domain_count; i < link_count; i++) {
650 		struct device_link *link;
651 
652 		link = device_link_add(domain_devs[i - domain_count + 1],
653 				       domain_devs[i - domain_count],
654 				       DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
655 		if (!link) {
656 			err = -ENODEV;
657 			goto err_unlink;
658 		}
659 
660 		domain_links[i] = link;
661 	}
662 
663 	pvr_dev->power = (struct pvr_device_power){
664 		.domain_devs = no_free_ptr(domain_devs),
665 		.domain_links = no_free_ptr(domain_links),
666 		.domain_count = domain_count,
667 	};
668 
669 	return 0;
670 
671 err_unlink:
672 	while (--i >= 0)
673 		device_link_del(domain_links[i]);
674 
675 	i = domain_count;
676 
677 err_detach:
678 	while (--i >= 0)
679 		dev_pm_domain_detach(domain_devs[i], true);
680 
681 	return err;
682 }
683 
684 void pvr_power_domains_fini(struct pvr_device *pvr_dev)
685 {
686 	const int domain_count = pvr_dev->power.domain_count;
687 
688 	int i = domain_count + (domain_count - 1);
689 
690 	while (--i >= 0)
691 		device_link_del(pvr_dev->power.domain_links[i]);
692 
693 	i = domain_count;
694 
695 	while (--i >= 0)
696 		dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
697 
698 	kfree(pvr_dev->power.domain_links);
699 	kfree(pvr_dev->power.domain_devs);
700 
701 	pvr_dev->power = (struct pvr_device_power){ 0 };
702 }
703