1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_device.h"
5 #include "pvr_fw.h"
6 #include "pvr_fw_startstop.h"
7 #include "pvr_power.h"
8 #include "pvr_queue.h"
9 #include "pvr_rogue_fwif.h"
10
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <drm/drm_print.h>
14 #include <linux/cleanup.h>
15 #include <linux/clk.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/pwrseq/consumer.h>
23 #include <linux/reset.h>
24 #include <linux/timer.h>
25 #include <linux/types.h>
26 #include <linux/workqueue.h>
27
28 #define POWER_SYNC_TIMEOUT_US (1000000) /* 1s */
29
30 #define WATCHDOG_TIME_MS (500)
31
32 /**
33 * pvr_device_lost() - Mark GPU device as lost
34 * @pvr_dev: Target PowerVR device.
35 *
36 * This will cause the DRM device to be unplugged.
37 */
38 void
pvr_device_lost(struct pvr_device * pvr_dev)39 pvr_device_lost(struct pvr_device *pvr_dev)
40 {
41 if (!pvr_dev->lost) {
42 pvr_dev->lost = true;
43 drm_dev_unplug(from_pvr_device(pvr_dev));
44 }
45 }
46
47 static int
pvr_power_send_command(struct pvr_device * pvr_dev,struct rogue_fwif_kccb_cmd * pow_cmd)48 pvr_power_send_command(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *pow_cmd)
49 {
50 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
51 u32 slot_nr;
52 u32 value;
53 int err;
54
55 WRITE_ONCE(*fw_dev->power_sync, 0);
56
57 err = pvr_kccb_send_cmd_powered(pvr_dev, pow_cmd, &slot_nr);
58 if (err)
59 return err;
60
61 /* Wait for FW to acknowledge. */
62 return readl_poll_timeout(pvr_dev->fw_dev.power_sync, value, value != 0, 100,
63 POWER_SYNC_TIMEOUT_US);
64 }
65
66 static int
pvr_power_request_idle(struct pvr_device * pvr_dev)67 pvr_power_request_idle(struct pvr_device *pvr_dev)
68 {
69 struct rogue_fwif_kccb_cmd pow_cmd;
70
71 /* Send FORCED_IDLE request to FW. */
72 pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
73 pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_FORCED_IDLE_REQ;
74 pow_cmd.cmd_data.pow_data.power_req_data.pow_request_type = ROGUE_FWIF_POWER_FORCE_IDLE;
75
76 return pvr_power_send_command(pvr_dev, &pow_cmd);
77 }
78
79 static int
pvr_power_request_pwr_off(struct pvr_device * pvr_dev)80 pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
81 {
82 struct rogue_fwif_kccb_cmd pow_cmd;
83
84 /* Send POW_OFF request to firmware. */
85 pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
86 pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_OFF_REQ;
87 pow_cmd.cmd_data.pow_data.power_req_data.forced = true;
88
89 return pvr_power_send_command(pvr_dev, &pow_cmd);
90 }
91
92 static int
pvr_power_fw_disable(struct pvr_device * pvr_dev,bool hard_reset,bool rpm_suspend)93 pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset, bool rpm_suspend)
94 {
95 int err;
96
97 if (!hard_reset) {
98 cancel_delayed_work_sync(&pvr_dev->watchdog.work);
99
100 err = pvr_power_request_idle(pvr_dev);
101 if (err)
102 return err;
103
104 err = pvr_power_request_pwr_off(pvr_dev);
105 if (err)
106 return err;
107 }
108
109 if (rpm_suspend) {
110 /* This also waits for late processing of GPU or firmware IRQs in other cores */
111 disable_irq(pvr_dev->irq);
112 }
113
114 err = pvr_fw_stop(pvr_dev);
115 if (err && rpm_suspend)
116 enable_irq(pvr_dev->irq);
117
118 return err;
119 }
120
121 static int
pvr_power_fw_enable(struct pvr_device * pvr_dev,bool rpm_resume)122 pvr_power_fw_enable(struct pvr_device *pvr_dev, bool rpm_resume)
123 {
124 int err;
125
126 if (rpm_resume)
127 enable_irq(pvr_dev->irq);
128
129 err = pvr_fw_start(pvr_dev);
130 if (err)
131 goto out;
132
133 err = pvr_wait_for_fw_boot(pvr_dev);
134 if (err) {
135 drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
136 pvr_fw_stop(pvr_dev);
137 goto out;
138 }
139
140 queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
141 msecs_to_jiffies(WATCHDOG_TIME_MS));
142
143 return 0;
144
145 out:
146 if (rpm_resume)
147 disable_irq(pvr_dev->irq);
148
149 return err;
150 }
151
152 bool
pvr_power_is_idle(struct pvr_device * pvr_dev)153 pvr_power_is_idle(struct pvr_device *pvr_dev)
154 {
155 /*
156 * FW power state can be out of date if a KCCB command has been submitted but the FW hasn't
157 * started processing it yet. So also check the KCCB status.
158 */
159 enum rogue_fwif_pow_state pow_state = READ_ONCE(pvr_dev->fw_dev.fwif_sysdata->pow_state);
160 bool kccb_idle = pvr_kccb_is_idle(pvr_dev);
161
162 return (pow_state == ROGUE_FWIF_POW_IDLE) && kccb_idle;
163 }
164
165 static bool
pvr_watchdog_kccb_stalled(struct pvr_device * pvr_dev)166 pvr_watchdog_kccb_stalled(struct pvr_device *pvr_dev)
167 {
168 /* Check KCCB commands are progressing. */
169 u32 kccb_cmds_executed = pvr_dev->fw_dev.fwif_osdata->kccb_cmds_executed;
170 bool kccb_is_idle = pvr_kccb_is_idle(pvr_dev);
171
172 if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed && !kccb_is_idle) {
173 pvr_dev->watchdog.kccb_stall_count++;
174
175 /*
176 * If we have commands pending with no progress for 2 consecutive polls then
177 * consider KCCB command processing stalled.
178 */
179 if (pvr_dev->watchdog.kccb_stall_count == 2) {
180 pvr_dev->watchdog.kccb_stall_count = 0;
181 return true;
182 }
183 } else if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed) {
184 bool has_active_contexts;
185
186 mutex_lock(&pvr_dev->queues.lock);
187 has_active_contexts = list_empty(&pvr_dev->queues.active);
188 mutex_unlock(&pvr_dev->queues.lock);
189
190 if (has_active_contexts) {
191 /* Send a HEALTH_CHECK command so we can verify FW is still alive. */
192 struct rogue_fwif_kccb_cmd health_check_cmd;
193
194 health_check_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK;
195
196 pvr_kccb_send_cmd_powered(pvr_dev, &health_check_cmd, NULL);
197 }
198 } else {
199 pvr_dev->watchdog.old_kccb_cmds_executed = kccb_cmds_executed;
200 pvr_dev->watchdog.kccb_stall_count = 0;
201 }
202
203 return false;
204 }
205
206 static void
pvr_watchdog_worker(struct work_struct * work)207 pvr_watchdog_worker(struct work_struct *work)
208 {
209 struct pvr_device *pvr_dev = container_of(work, struct pvr_device,
210 watchdog.work.work);
211 bool stalled;
212
213 if (pvr_dev->lost)
214 return;
215
216 if (pm_runtime_get_if_in_use(from_pvr_device(pvr_dev)->dev) <= 0)
217 goto out_requeue;
218
219 if (!pvr_dev->fw_dev.booted)
220 goto out_pm_runtime_put;
221
222 stalled = pvr_watchdog_kccb_stalled(pvr_dev);
223
224 if (stalled) {
225 drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
226
227 pvr_power_reset(pvr_dev, true);
228 /* Device may be lost at this point. */
229 }
230
231 out_pm_runtime_put:
232 pm_runtime_put(from_pvr_device(pvr_dev)->dev);
233
234 out_requeue:
235 if (!pvr_dev->lost) {
236 queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
237 msecs_to_jiffies(WATCHDOG_TIME_MS));
238 }
239 }
240
241 /**
242 * pvr_watchdog_init() - Initialise watchdog for device
243 * @pvr_dev: Target PowerVR device.
244 *
245 * Returns:
246 * * 0 on success, or
247 * * -%ENOMEM on out of memory.
248 */
249 int
pvr_watchdog_init(struct pvr_device * pvr_dev)250 pvr_watchdog_init(struct pvr_device *pvr_dev)
251 {
252 INIT_DELAYED_WORK(&pvr_dev->watchdog.work, pvr_watchdog_worker);
253
254 return 0;
255 }
256
pvr_power_init_manual(struct pvr_device * pvr_dev)257 static int pvr_power_init_manual(struct pvr_device *pvr_dev)
258 {
259 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
260 struct reset_control *reset;
261
262 reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
263 if (IS_ERR(reset))
264 return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
265 "failed to get gpu reset line\n");
266
267 pvr_dev->reset = reset;
268
269 return 0;
270 }
271
pvr_power_on_sequence_manual(struct pvr_device * pvr_dev)272 static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev)
273 {
274 int err;
275
276 err = clk_prepare_enable(pvr_dev->core_clk);
277 if (err)
278 return err;
279
280 err = clk_prepare_enable(pvr_dev->sys_clk);
281 if (err)
282 goto err_core_clk_disable;
283
284 err = clk_prepare_enable(pvr_dev->mem_clk);
285 if (err)
286 goto err_sys_clk_disable;
287
288 /*
289 * According to the hardware manual, a delay of at least 32 clock
290 * cycles is required between de-asserting the clkgen reset and
291 * de-asserting the GPU reset. Assuming a worst-case scenario with
292 * a very high GPU clock frequency, a delay of 1 microsecond is
293 * sufficient to ensure this requirement is met across all
294 * feasible GPU clock speeds.
295 */
296 udelay(1);
297
298 err = reset_control_deassert(pvr_dev->reset);
299 if (err)
300 goto err_mem_clk_disable;
301
302 return 0;
303
304 err_mem_clk_disable:
305 clk_disable_unprepare(pvr_dev->mem_clk);
306
307 err_sys_clk_disable:
308 clk_disable_unprepare(pvr_dev->sys_clk);
309
310 err_core_clk_disable:
311 clk_disable_unprepare(pvr_dev->core_clk);
312
313 return err;
314 }
315
pvr_power_off_sequence_manual(struct pvr_device * pvr_dev)316 static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev)
317 {
318 int err;
319
320 err = reset_control_assert(pvr_dev->reset);
321
322 clk_disable_unprepare(pvr_dev->mem_clk);
323 clk_disable_unprepare(pvr_dev->sys_clk);
324 clk_disable_unprepare(pvr_dev->core_clk);
325
326 return err;
327 }
328
329 const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = {
330 .init = pvr_power_init_manual,
331 .power_on = pvr_power_on_sequence_manual,
332 .power_off = pvr_power_off_sequence_manual,
333 };
334
pvr_power_init_pwrseq(struct pvr_device * pvr_dev)335 static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev)
336 {
337 struct device *dev = from_pvr_device(pvr_dev)->dev;
338
339 pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power");
340 if (IS_ERR(pvr_dev->pwrseq)) {
341 /*
342 * This platform requires a sequencer. If we can't get it, we
343 * must return the error (including -EPROBE_DEFER to wait for
344 * the provider to appear)
345 */
346 return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq),
347 "Failed to get required power sequencer\n");
348 }
349
350 return 0;
351 }
352
pvr_power_on_sequence_pwrseq(struct pvr_device * pvr_dev)353 static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev)
354 {
355 return pwrseq_power_on(pvr_dev->pwrseq);
356 }
357
pvr_power_off_sequence_pwrseq(struct pvr_device * pvr_dev)358 static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev)
359 {
360 return pwrseq_power_off(pvr_dev->pwrseq);
361 }
362
363 const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = {
364 .init = pvr_power_init_pwrseq,
365 .power_on = pvr_power_on_sequence_pwrseq,
366 .power_off = pvr_power_off_sequence_pwrseq,
367 };
368
369 int
pvr_power_device_suspend(struct device * dev)370 pvr_power_device_suspend(struct device *dev)
371 {
372 struct platform_device *plat_dev = to_platform_device(dev);
373 struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
374 struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
375 int err = 0;
376 int idx;
377
378 if (!drm_dev_enter(drm_dev, &idx))
379 return -EIO;
380
381 if (pvr_dev->fw_dev.booted) {
382 err = pvr_power_fw_disable(pvr_dev, false, true);
383 if (err)
384 goto err_drm_dev_exit;
385 }
386
387 err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
388
389 err_drm_dev_exit:
390 drm_dev_exit(idx);
391
392 return err;
393 }
394
395 int
pvr_power_device_resume(struct device * dev)396 pvr_power_device_resume(struct device *dev)
397 {
398 struct platform_device *plat_dev = to_platform_device(dev);
399 struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
400 struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
401 int idx;
402 int err;
403
404 if (!drm_dev_enter(drm_dev, &idx))
405 return -EIO;
406
407 err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev);
408 if (err)
409 goto err_drm_dev_exit;
410
411 if (pvr_dev->fw_dev.booted) {
412 err = pvr_power_fw_enable(pvr_dev, true);
413 if (err)
414 goto err_power_off;
415 }
416
417 drm_dev_exit(idx);
418
419 return 0;
420
421 err_power_off:
422 pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
423
424 err_drm_dev_exit:
425 drm_dev_exit(idx);
426
427 return err;
428 }
429
430 int
pvr_power_device_idle(struct device * dev)431 pvr_power_device_idle(struct device *dev)
432 {
433 struct platform_device *plat_dev = to_platform_device(dev);
434 struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
435 struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
436
437 return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
438 }
439
440 static int
pvr_power_clear_error(struct pvr_device * pvr_dev)441 pvr_power_clear_error(struct pvr_device *pvr_dev)
442 {
443 struct device *dev = from_pvr_device(pvr_dev)->dev;
444 int err;
445
446 /* Ensure the device state is known and nothing is happening past this point */
447 pm_runtime_disable(dev);
448
449 /* Attempt to clear the runtime PM error by setting the current state again */
450 if (pm_runtime_status_suspended(dev))
451 err = pm_runtime_set_suspended(dev);
452 else
453 err = pm_runtime_set_active(dev);
454
455 if (err) {
456 drm_err(from_pvr_device(pvr_dev),
457 "%s: Failed to clear runtime PM error (new error %d)\n",
458 __func__, err);
459 }
460
461 pm_runtime_enable(dev);
462
463 return err;
464 }
465
466 /**
467 * pvr_power_get_clear() - Acquire a power reference, correcting any errors
468 * @pvr_dev: Device pointer
469 *
470 * Attempt to acquire a power reference on the device. If the runtime PM
471 * is in error state, attempt to clear the error and retry.
472 *
473 * Returns:
474 * * 0 on success, or
475 * * Any error code returned by pvr_power_get() or the runtime PM API.
476 */
477 static int
pvr_power_get_clear(struct pvr_device * pvr_dev)478 pvr_power_get_clear(struct pvr_device *pvr_dev)
479 {
480 int err;
481
482 err = pvr_power_get(pvr_dev);
483 if (err == 0)
484 return err;
485
486 drm_warn(from_pvr_device(pvr_dev),
487 "%s: pvr_power_get returned error %d, attempting recovery\n",
488 __func__, err);
489
490 err = pvr_power_clear_error(pvr_dev);
491 if (err)
492 return err;
493
494 return pvr_power_get(pvr_dev);
495 }
496
497 /**
498 * pvr_power_reset() - Reset the GPU
499 * @pvr_dev: Device pointer
500 * @hard_reset: %true for hard reset, %false for soft reset
501 *
502 * If @hard_reset is %false and the FW processor fails to respond during the reset process, this
503 * function will attempt a hard reset.
504 *
505 * If a hard reset fails then the GPU device is reported as lost.
506 *
507 * Returns:
508 * * 0 on success, or
509 * * Any error code returned by pvr_power_get, pvr_power_fw_disable or pvr_power_fw_enable().
510 */
511 int
pvr_power_reset(struct pvr_device * pvr_dev,bool hard_reset)512 pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
513 {
514 bool queues_disabled = false;
515 int err;
516
517 /*
518 * Take a power reference during the reset. This should prevent any interference with the
519 * power state during reset.
520 */
521 WARN_ON(pvr_power_get_clear(pvr_dev));
522
523 down_write(&pvr_dev->reset_sem);
524
525 if (pvr_dev->lost) {
526 err = -EIO;
527 goto err_up_write;
528 }
529
530 /* Disable IRQs for the duration of the reset. */
531 if (hard_reset) {
532 disable_irq(pvr_dev->irq);
533 } else {
534 /*
535 * Soft reset is triggered as a response to a FW command to the Host and is
536 * processed from the threaded IRQ handler. This code cannot (nor needs to)
537 * wait for any IRQ processing to complete.
538 */
539 disable_irq_nosync(pvr_dev->irq);
540 }
541
542 do {
543 if (hard_reset) {
544 pvr_queue_device_pre_reset(pvr_dev);
545 queues_disabled = true;
546 }
547
548 err = pvr_power_fw_disable(pvr_dev, hard_reset, false);
549 if (!err) {
550 if (hard_reset) {
551 pvr_dev->fw_dev.booted = false;
552 WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev));
553
554 err = pvr_fw_hard_reset(pvr_dev);
555 if (err)
556 goto err_device_lost;
557
558 err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev);
559 pvr_dev->fw_dev.booted = true;
560 if (err)
561 goto err_device_lost;
562 } else {
563 /* Clear the FW faulted flags. */
564 pvr_dev->fw_dev.fwif_sysdata->hwr_state_flags &=
565 ~(ROGUE_FWIF_HWR_FW_FAULT |
566 ROGUE_FWIF_HWR_RESTART_REQUESTED);
567 }
568
569 pvr_fw_irq_clear(pvr_dev);
570
571 err = pvr_power_fw_enable(pvr_dev, false);
572 }
573
574 if (err && hard_reset)
575 goto err_device_lost;
576
577 if (err && !hard_reset) {
578 drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
579 hard_reset = true;
580 }
581 } while (err);
582
583 if (queues_disabled)
584 pvr_queue_device_post_reset(pvr_dev);
585
586 enable_irq(pvr_dev->irq);
587
588 up_write(&pvr_dev->reset_sem);
589
590 pvr_power_put(pvr_dev);
591
592 return 0;
593
594 err_device_lost:
595 drm_err(from_pvr_device(pvr_dev), "GPU device lost");
596 pvr_device_lost(pvr_dev);
597
598 /* Leave IRQs disabled if the device is lost. */
599
600 if (queues_disabled)
601 pvr_queue_device_post_reset(pvr_dev);
602
603 err_up_write:
604 up_write(&pvr_dev->reset_sem);
605
606 pvr_power_put(pvr_dev);
607
608 return err;
609 }
610
611 /**
612 * pvr_watchdog_fini() - Shutdown watchdog for device
613 * @pvr_dev: Target PowerVR device.
614 */
615 void
pvr_watchdog_fini(struct pvr_device * pvr_dev)616 pvr_watchdog_fini(struct pvr_device *pvr_dev)
617 {
618 cancel_delayed_work_sync(&pvr_dev->watchdog.work);
619 }
620
pvr_power_domains_init(struct pvr_device * pvr_dev)621 int pvr_power_domains_init(struct pvr_device *pvr_dev)
622 {
623 struct device *dev = from_pvr_device(pvr_dev)->dev;
624
625 struct device_link **domain_links __free(kfree) = NULL;
626 struct device **domain_devs __free(kfree) = NULL;
627 int domain_count;
628 int link_count;
629
630 char dev_name[2] = "a";
631 int err;
632 int i;
633
634 domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
635 "#power-domain-cells");
636 if (domain_count < 0)
637 return domain_count;
638
639 if (domain_count <= 1)
640 return 0;
641
642 link_count = domain_count + (domain_count - 1);
643
644 domain_devs = kzalloc_objs(*domain_devs, domain_count);
645 if (!domain_devs)
646 return -ENOMEM;
647
648 domain_links = kzalloc_objs(*domain_links, link_count);
649 if (!domain_links)
650 return -ENOMEM;
651
652 for (i = 0; i < domain_count; i++) {
653 struct device *domain_dev;
654
655 dev_name[0] = 'a' + i;
656 domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
657 if (IS_ERR_OR_NULL(domain_dev)) {
658 err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
659 goto err_detach;
660 }
661
662 domain_devs[i] = domain_dev;
663 }
664
665 for (i = 0; i < domain_count; i++) {
666 struct device_link *link;
667
668 link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
669 if (!link) {
670 err = -ENODEV;
671 goto err_unlink;
672 }
673
674 domain_links[i] = link;
675 }
676
677 for (i = domain_count; i < link_count; i++) {
678 struct device_link *link;
679
680 link = device_link_add(domain_devs[i - domain_count + 1],
681 domain_devs[i - domain_count],
682 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
683 if (!link) {
684 err = -ENODEV;
685 goto err_unlink;
686 }
687
688 domain_links[i] = link;
689 }
690
691 pvr_dev->power = (struct pvr_device_power){
692 .domain_devs = no_free_ptr(domain_devs),
693 .domain_links = no_free_ptr(domain_links),
694 .domain_count = domain_count,
695 };
696
697 return 0;
698
699 err_unlink:
700 while (--i >= 0)
701 device_link_del(domain_links[i]);
702
703 i = domain_count;
704
705 err_detach:
706 while (--i >= 0)
707 dev_pm_domain_detach(domain_devs[i], true);
708
709 return err;
710 }
711
pvr_power_domains_fini(struct pvr_device * pvr_dev)712 void pvr_power_domains_fini(struct pvr_device *pvr_dev)
713 {
714 const int domain_count = pvr_dev->power.domain_count;
715
716 int i = domain_count + (domain_count - 1);
717
718 while (--i >= 0)
719 device_link_del(pvr_dev->power.domain_links[i]);
720
721 i = domain_count;
722
723 while (--i >= 0)
724 dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
725
726 kfree(pvr_dev->power.domain_links);
727 kfree(pvr_dev->power.domain_devs);
728
729 pvr_dev->power = (struct pvr_device_power){ 0 };
730 }
731