xref: /linux/drivers/gpu/drm/panthor/panthor_pwr.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2025 ARM Limited. All rights reserved. */
3 
4 #include <linux/platform_device.h>
5 #include <linux/interrupt.h>
6 #include <linux/cleanup.h>
7 #include <linux/iopoll.h>
8 #include <linux/wait.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 
13 #include "panthor_device.h"
14 #include "panthor_hw.h"
15 #include "panthor_pwr.h"
16 #include "panthor_regs.h"
17 
18 #define PWR_INTERRUPTS_MASK \
19 	(PWR_IRQ_POWER_CHANGED_SINGLE | \
20 	 PWR_IRQ_POWER_CHANGED_ALL | \
21 	 PWR_IRQ_DELEGATION_CHANGED | \
22 	 PWR_IRQ_RESET_COMPLETED | \
23 	 PWR_IRQ_RETRACT_COMPLETED | \
24 	 PWR_IRQ_INSPECT_COMPLETED | \
25 	 PWR_IRQ_COMMAND_NOT_ALLOWED | \
26 	 PWR_IRQ_COMMAND_INVALID)
27 
28 #define PWR_ALL_CORES_MASK		GENMASK_U64(63, 0)
29 
30 #define PWR_DOMAIN_MAX_BITS		16
31 
32 #define PWR_TRANSITION_TIMEOUT_US	(2ULL * USEC_PER_SEC)
33 
34 #define PWR_RETRACT_TIMEOUT_US		(2ULL * USEC_PER_MSEC)
35 
36 #define PWR_RESET_TIMEOUT_MS		500
37 
38 /**
39  * struct panthor_pwr - PWR_CONTROL block management data.
40  */
41 struct panthor_pwr {
42 	/** @irq: PWR irq. */
43 	struct panthor_irq irq;
44 
45 	/** @reqs_lock: Lock protecting access to pending_reqs. */
46 	spinlock_t reqs_lock;
47 
48 	/** @pending_reqs: Pending PWR requests. */
49 	u32 pending_reqs;
50 
51 	/** @reqs_acked: PWR request wait queue. */
52 	wait_queue_head_t reqs_acked;
53 };
54 
panthor_pwr_irq_handler(struct panthor_device * ptdev,u32 status)55 static void panthor_pwr_irq_handler(struct panthor_device *ptdev, u32 status)
56 {
57 	spin_lock(&ptdev->pwr->reqs_lock);
58 	gpu_write(ptdev, PWR_INT_CLEAR, status);
59 
60 	if (unlikely(status & PWR_IRQ_COMMAND_NOT_ALLOWED))
61 		drm_err(&ptdev->base, "PWR_IRQ: COMMAND_NOT_ALLOWED");
62 
63 	if (unlikely(status & PWR_IRQ_COMMAND_INVALID))
64 		drm_err(&ptdev->base, "PWR_IRQ: COMMAND_INVALID");
65 
66 	if (status & ptdev->pwr->pending_reqs) {
67 		ptdev->pwr->pending_reqs &= ~status;
68 		wake_up_all(&ptdev->pwr->reqs_acked);
69 	}
70 	spin_unlock(&ptdev->pwr->reqs_lock);
71 }
72 PANTHOR_IRQ_HANDLER(pwr, PWR, panthor_pwr_irq_handler);
73 
panthor_pwr_write_command(struct panthor_device * ptdev,u32 command,u64 args)74 static void panthor_pwr_write_command(struct panthor_device *ptdev, u32 command, u64 args)
75 {
76 	if (args)
77 		gpu_write64(ptdev, PWR_CMDARG, args);
78 
79 	gpu_write(ptdev, PWR_COMMAND, command);
80 }
81 
reset_irq_raised(struct panthor_device * ptdev)82 static bool reset_irq_raised(struct panthor_device *ptdev)
83 {
84 	return gpu_read(ptdev, PWR_INT_RAWSTAT) & PWR_IRQ_RESET_COMPLETED;
85 }
86 
reset_pending(struct panthor_device * ptdev)87 static bool reset_pending(struct panthor_device *ptdev)
88 {
89 	return (ptdev->pwr->pending_reqs & PWR_IRQ_RESET_COMPLETED);
90 }
91 
panthor_pwr_reset(struct panthor_device * ptdev,u32 reset_cmd)92 static int panthor_pwr_reset(struct panthor_device *ptdev, u32 reset_cmd)
93 {
94 	scoped_guard(spinlock_irqsave, &ptdev->pwr->reqs_lock) {
95 		if (reset_pending(ptdev)) {
96 			drm_WARN(&ptdev->base, 1, "Reset already pending");
97 		} else {
98 			ptdev->pwr->pending_reqs |= PWR_IRQ_RESET_COMPLETED;
99 			gpu_write(ptdev, PWR_INT_CLEAR, PWR_IRQ_RESET_COMPLETED);
100 			panthor_pwr_write_command(ptdev, reset_cmd, 0);
101 		}
102 	}
103 
104 	if (!wait_event_timeout(ptdev->pwr->reqs_acked, !reset_pending(ptdev),
105 				msecs_to_jiffies(PWR_RESET_TIMEOUT_MS))) {
106 		guard(spinlock_irqsave)(&ptdev->pwr->reqs_lock);
107 
108 		if (reset_pending(ptdev) && !reset_irq_raised(ptdev)) {
109 			drm_err(&ptdev->base, "RESET timed out (0x%x)", reset_cmd);
110 			return -ETIMEDOUT;
111 		}
112 
113 		ptdev->pwr->pending_reqs &= ~PWR_IRQ_RESET_COMPLETED;
114 	}
115 
116 	return 0;
117 }
118 
get_domain_name(u8 domain)119 static const char *get_domain_name(u8 domain)
120 {
121 	switch (domain) {
122 	case PWR_COMMAND_DOMAIN_L2:
123 		return "L2";
124 	case PWR_COMMAND_DOMAIN_TILER:
125 		return "Tiler";
126 	case PWR_COMMAND_DOMAIN_SHADER:
127 		return "Shader";
128 	case PWR_COMMAND_DOMAIN_BASE:
129 		return "Base";
130 	case PWR_COMMAND_DOMAIN_STACK:
131 		return "Stack";
132 	}
133 	return "Unknown";
134 }
135 
get_domain_base(u8 domain)136 static u32 get_domain_base(u8 domain)
137 {
138 	switch (domain) {
139 	case PWR_COMMAND_DOMAIN_L2:
140 		return PWR_L2_PRESENT;
141 	case PWR_COMMAND_DOMAIN_TILER:
142 		return PWR_TILER_PRESENT;
143 	case PWR_COMMAND_DOMAIN_SHADER:
144 		return PWR_SHADER_PRESENT;
145 	case PWR_COMMAND_DOMAIN_BASE:
146 		return PWR_BASE_PRESENT;
147 	case PWR_COMMAND_DOMAIN_STACK:
148 		return PWR_STACK_PRESENT;
149 	}
150 	return 0;
151 }
152 
get_domain_ready_reg(u32 domain)153 static u32 get_domain_ready_reg(u32 domain)
154 {
155 	return get_domain_base(domain) + (PWR_L2_READY - PWR_L2_PRESENT);
156 }
157 
get_domain_pwrtrans_reg(u32 domain)158 static u32 get_domain_pwrtrans_reg(u32 domain)
159 {
160 	return get_domain_base(domain) + (PWR_L2_PWRTRANS - PWR_L2_PRESENT);
161 }
162 
is_valid_domain(u32 domain)163 static bool is_valid_domain(u32 domain)
164 {
165 	return get_domain_base(domain) != 0;
166 }
167 
has_rtu(struct panthor_device * ptdev)168 static bool has_rtu(struct panthor_device *ptdev)
169 {
170 	return ptdev->gpu_info.gpu_features & GPU_FEATURES_RAY_TRAVERSAL;
171 }
172 
get_domain_subdomain(struct panthor_device * ptdev,u32 domain)173 static u8 get_domain_subdomain(struct panthor_device *ptdev, u32 domain)
174 {
175 	if (domain == PWR_COMMAND_DOMAIN_SHADER && has_rtu(ptdev))
176 		return PWR_COMMAND_SUBDOMAIN_RTU;
177 
178 	return 0;
179 }
180 
panthor_pwr_domain_wait_transition(struct panthor_device * ptdev,u32 domain,u32 timeout_us)181 static int panthor_pwr_domain_wait_transition(struct panthor_device *ptdev, u32 domain,
182 					      u32 timeout_us)
183 {
184 	u32 pwrtrans_reg = get_domain_pwrtrans_reg(domain);
185 	u64 val;
186 	int ret = 0;
187 
188 	ret = gpu_read64_poll_timeout(ptdev, pwrtrans_reg, val, !(PWR_ALL_CORES_MASK & val), 100,
189 				      timeout_us);
190 	if (ret) {
191 		drm_err(&ptdev->base, "%s domain power in transition, pwrtrans(0x%llx)",
192 			get_domain_name(domain), val);
193 		return ret;
194 	}
195 
196 	return 0;
197 }
198 
panthor_pwr_debug_info_show(struct panthor_device * ptdev)199 static void panthor_pwr_debug_info_show(struct panthor_device *ptdev)
200 {
201 	drm_info(&ptdev->base, "GPU_FEATURES:    0x%016llx", gpu_read64(ptdev, GPU_FEATURES));
202 	drm_info(&ptdev->base, "PWR_STATUS:      0x%016llx", gpu_read64(ptdev, PWR_STATUS));
203 	drm_info(&ptdev->base, "L2_PRESENT:      0x%016llx", gpu_read64(ptdev, PWR_L2_PRESENT));
204 	drm_info(&ptdev->base, "L2_PWRTRANS:     0x%016llx", gpu_read64(ptdev, PWR_L2_PWRTRANS));
205 	drm_info(&ptdev->base, "L2_READY:        0x%016llx", gpu_read64(ptdev, PWR_L2_READY));
206 	drm_info(&ptdev->base, "TILER_PRESENT:   0x%016llx", gpu_read64(ptdev, PWR_TILER_PRESENT));
207 	drm_info(&ptdev->base, "TILER_PWRTRANS:  0x%016llx", gpu_read64(ptdev, PWR_TILER_PWRTRANS));
208 	drm_info(&ptdev->base, "TILER_READY:     0x%016llx", gpu_read64(ptdev, PWR_TILER_READY));
209 	drm_info(&ptdev->base, "SHADER_PRESENT:  0x%016llx", gpu_read64(ptdev, PWR_SHADER_PRESENT));
210 	drm_info(&ptdev->base, "SHADER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PWRTRANS));
211 	drm_info(&ptdev->base, "SHADER_READY:    0x%016llx", gpu_read64(ptdev, PWR_SHADER_READY));
212 }
213 
panthor_pwr_domain_transition(struct panthor_device * ptdev,u32 cmd,u32 domain,u64 mask,u32 timeout_us)214 static int panthor_pwr_domain_transition(struct panthor_device *ptdev, u32 cmd, u32 domain,
215 					 u64 mask, u32 timeout_us)
216 {
217 	u32 ready_reg = get_domain_ready_reg(domain);
218 	u32 pwr_cmd = PWR_COMMAND_DEF(cmd, domain, get_domain_subdomain(ptdev, domain));
219 	u64 expected_val = 0;
220 	u64 val;
221 	int ret = 0;
222 
223 	if (drm_WARN_ON(&ptdev->base, !is_valid_domain(domain)))
224 		return -EINVAL;
225 
226 	switch (cmd) {
227 	case PWR_COMMAND_POWER_DOWN:
228 		expected_val = 0;
229 		break;
230 	case PWR_COMMAND_POWER_UP:
231 		expected_val = mask;
232 		break;
233 	default:
234 		drm_err(&ptdev->base, "Invalid power domain transition command (0x%x)", cmd);
235 		return -EINVAL;
236 	}
237 
238 	ret = panthor_pwr_domain_wait_transition(ptdev, domain, timeout_us);
239 	if (ret)
240 		return ret;
241 
242 	/* domain already in target state, return early */
243 	if ((gpu_read64(ptdev, ready_reg) & mask) == expected_val)
244 		return 0;
245 
246 	panthor_pwr_write_command(ptdev, pwr_cmd, mask);
247 
248 	ret = gpu_read64_poll_timeout(ptdev, ready_reg, val, (mask & val) == expected_val, 100,
249 				      timeout_us);
250 	if (ret) {
251 		drm_err(&ptdev->base,
252 			"timeout waiting on %s power domain transition, cmd(0x%x), arg(0x%llx)",
253 			get_domain_name(domain), pwr_cmd, mask);
254 		panthor_pwr_debug_info_show(ptdev);
255 		return ret;
256 	}
257 
258 	return 0;
259 }
260 
261 #define panthor_pwr_domain_power_off(__ptdev, __domain, __mask, __timeout_us)            \
262 	panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_DOWN, __domain, __mask, \
263 				      __timeout_us)
264 
265 #define panthor_pwr_domain_power_on(__ptdev, __domain, __mask, __timeout_us) \
266 	panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_UP, __domain, __mask, __timeout_us)
267 
268 /**
269  * retract_domain() - Retract control of a domain from MCU
270  * @ptdev: Device.
271  * @domain: Domain to retract the control
272  *
273  * Retracting L2 domain is not expected since it won't be delegated.
274  *
275  * Return: 0 on success or retracted already.
276  *         -EPERM if domain is L2.
277  *         A negative error code otherwise.
278  */
retract_domain(struct panthor_device * ptdev,u32 domain)279 static int retract_domain(struct panthor_device *ptdev, u32 domain)
280 {
281 	const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_RETRACT, domain, 0);
282 	const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
283 	const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain);
284 	const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain);
285 	u64 val;
286 	int ret;
287 
288 	if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2))
289 		return -EPERM;
290 
291 	ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, !(PWR_STATUS_RETRACT_PENDING & val),
292 				      0, PWR_RETRACT_TIMEOUT_US);
293 	if (ret) {
294 		drm_err(&ptdev->base, "%s domain retract pending", get_domain_name(domain));
295 		return ret;
296 	}
297 
298 	if (!(pwr_status & delegated_mask)) {
299 		drm_dbg(&ptdev->base, "%s domain already retracted", get_domain_name(domain));
300 		return 0;
301 	}
302 
303 	panthor_pwr_write_command(ptdev, pwr_cmd, 0);
304 
305 	/*
306 	 * On successful retraction
307 	 * allow-flag will be set with delegated-flag being cleared.
308 	 */
309 	ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val,
310 				      ((delegated_mask | allow_mask) & val) == allow_mask, 10,
311 				      PWR_TRANSITION_TIMEOUT_US);
312 	if (ret) {
313 		drm_err(&ptdev->base, "Retracting %s domain timeout, cmd(0x%x)",
314 			get_domain_name(domain), pwr_cmd);
315 		return ret;
316 	}
317 
318 	return 0;
319 }
320 
321 /**
322  * delegate_domain() - Delegate control of a domain to MCU
323  * @ptdev: Device.
324  * @domain: Domain to delegate the control
325  *
326  * Delegating L2 domain is prohibited.
327  *
328  * Return:
329  * *       0 on success or delegated already.
330  * *       -EPERM if domain is L2.
331  * *       A negative error code otherwise.
332  */
delegate_domain(struct panthor_device * ptdev,u32 domain)333 static int delegate_domain(struct panthor_device *ptdev, u32 domain)
334 {
335 	const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_DELEGATE, domain, 0);
336 	const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
337 	const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain);
338 	const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain);
339 	u64 val;
340 	int ret;
341 
342 	if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2))
343 		return -EPERM;
344 
345 	/* Already delegated, exit early */
346 	if (pwr_status & delegated_mask)
347 		return 0;
348 
349 	/* Check if the command is allowed before delegating. */
350 	if (!(pwr_status & allow_mask)) {
351 		drm_warn(&ptdev->base, "Delegating %s domain not allowed", get_domain_name(domain));
352 		return -EPERM;
353 	}
354 
355 	ret = panthor_pwr_domain_wait_transition(ptdev, domain, PWR_TRANSITION_TIMEOUT_US);
356 	if (ret)
357 		return ret;
358 
359 	panthor_pwr_write_command(ptdev, pwr_cmd, 0);
360 
361 	/*
362 	 * On successful delegation
363 	 * allow-flag will be cleared with delegated-flag being set.
364 	 */
365 	ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val,
366 				      ((delegated_mask | allow_mask) & val) == delegated_mask,
367 				      10, PWR_TRANSITION_TIMEOUT_US);
368 	if (ret) {
369 		drm_err(&ptdev->base, "Delegating %s domain timeout, cmd(0x%x)",
370 			get_domain_name(domain), pwr_cmd);
371 		return ret;
372 	}
373 
374 	return 0;
375 }
376 
panthor_pwr_delegate_domains(struct panthor_device * ptdev)377 static int panthor_pwr_delegate_domains(struct panthor_device *ptdev)
378 {
379 	int ret;
380 
381 	if (!ptdev->pwr)
382 		return 0;
383 
384 	ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER);
385 	if (ret)
386 		return ret;
387 
388 	ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_TILER);
389 	if (ret)
390 		goto err_retract_shader;
391 
392 	return 0;
393 
394 err_retract_shader:
395 	retract_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER);
396 
397 	return ret;
398 }
399 
400 /**
401  * panthor_pwr_domain_force_off - Forcefully power down a domain.
402  * @ptdev: Device.
403  * @domain: Domain to forcefully power down.
404  *
405  * This function will attempt to retract and power off the requested power
406  * domain. However, if retraction fails, the operation is aborted. If power off
407  * fails, the domain will remain retracted and under the host control.
408  *
409  * Return: 0 on success or a negative error code on failure.
410  */
panthor_pwr_domain_force_off(struct panthor_device * ptdev,u32 domain)411 static int panthor_pwr_domain_force_off(struct panthor_device *ptdev, u32 domain)
412 {
413 	const u64 domain_ready = gpu_read64(ptdev, get_domain_ready_reg(domain));
414 	int ret;
415 
416 	/* Domain already powered down, early exit. */
417 	if (!domain_ready)
418 		return 0;
419 
420 	/* Domain has to be in host control to issue power off command. */
421 	ret = retract_domain(ptdev, domain);
422 	if (ret)
423 		return ret;
424 
425 	return panthor_pwr_domain_power_off(ptdev, domain, domain_ready, PWR_TRANSITION_TIMEOUT_US);
426 }
427 
panthor_pwr_unplug(struct panthor_device * ptdev)428 void panthor_pwr_unplug(struct panthor_device *ptdev)
429 {
430 	unsigned long flags;
431 
432 	if (!ptdev->pwr)
433 		return;
434 
435 	/* Make sure the IRQ handler is not running after that point. */
436 	panthor_pwr_irq_suspend(&ptdev->pwr->irq);
437 
438 	/* Wake-up all waiters. */
439 	spin_lock_irqsave(&ptdev->pwr->reqs_lock, flags);
440 	ptdev->pwr->pending_reqs = 0;
441 	wake_up_all(&ptdev->pwr->reqs_acked);
442 	spin_unlock_irqrestore(&ptdev->pwr->reqs_lock, flags);
443 }
444 
panthor_pwr_init(struct panthor_device * ptdev)445 int panthor_pwr_init(struct panthor_device *ptdev)
446 {
447 	struct panthor_pwr *pwr;
448 	int err, irq;
449 
450 	if (!panthor_hw_has_pwr_ctrl(ptdev))
451 		return 0;
452 
453 	pwr = drmm_kzalloc(&ptdev->base, sizeof(*pwr), GFP_KERNEL);
454 	if (!pwr)
455 		return -ENOMEM;
456 
457 	spin_lock_init(&pwr->reqs_lock);
458 	init_waitqueue_head(&pwr->reqs_acked);
459 	ptdev->pwr = pwr;
460 
461 	irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
462 	if (irq < 0)
463 		return irq;
464 
465 	err = panthor_request_pwr_irq(ptdev, &pwr->irq, irq, PWR_INTERRUPTS_MASK);
466 	if (err)
467 		return err;
468 
469 	return 0;
470 }
471 
panthor_pwr_reset_soft(struct panthor_device * ptdev)472 int panthor_pwr_reset_soft(struct panthor_device *ptdev)
473 {
474 	if (!(gpu_read64(ptdev, PWR_STATUS) & PWR_STATUS_ALLOW_SOFT_RESET)) {
475 		drm_err(&ptdev->base, "RESET_SOFT not allowed");
476 		return -EOPNOTSUPP;
477 	}
478 
479 	return panthor_pwr_reset(ptdev, PWR_COMMAND_RESET_SOFT);
480 }
481 
panthor_pwr_l2_power_off(struct panthor_device * ptdev)482 void panthor_pwr_l2_power_off(struct panthor_device *ptdev)
483 {
484 	const u64 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2);
485 	const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
486 
487 	/* Abort if L2 power off constraints are not satisfied */
488 	if (!(pwr_status & l2_allow_mask)) {
489 		drm_warn(&ptdev->base, "Power off L2 domain not allowed");
490 		return;
491 	}
492 
493 	/* It is expected that when halting the MCU, it would power down its
494 	 * delegated domains. However, an unresponsive or hung MCU may not do
495 	 * so, which is why we need to check and retract the domains back into
496 	 * host control to be powered down in the right order before powering
497 	 * down the L2.
498 	 */
499 	if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_TILER))
500 		return;
501 
502 	if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_SHADER))
503 		return;
504 
505 	panthor_pwr_domain_power_off(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present,
506 				     PWR_TRANSITION_TIMEOUT_US);
507 }
508 
panthor_pwr_l2_power_on(struct panthor_device * ptdev)509 int panthor_pwr_l2_power_on(struct panthor_device *ptdev)
510 {
511 	const u32 pwr_status = gpu_read64(ptdev, PWR_STATUS);
512 	const u32 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2);
513 	int ret;
514 
515 	if ((pwr_status & l2_allow_mask) == 0) {
516 		drm_warn(&ptdev->base, "Power on L2 domain not allowed");
517 		return -EPERM;
518 	}
519 
520 	ret = panthor_pwr_domain_power_on(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present,
521 					  PWR_TRANSITION_TIMEOUT_US);
522 	if (ret)
523 		return ret;
524 
525 	/* Delegate control of the shader and tiler power domains to the MCU as
526 	 * it can better manage which shader/tiler cores need to be powered up
527 	 * or can be powered down based on currently running jobs.
528 	 *
529 	 * If the shader and tiler domains are already delegated to the MCU,
530 	 * this call would just return early.
531 	 */
532 	return panthor_pwr_delegate_domains(ptdev);
533 }
534 
panthor_pwr_suspend(struct panthor_device * ptdev)535 void panthor_pwr_suspend(struct panthor_device *ptdev)
536 {
537 	if (!ptdev->pwr)
538 		return;
539 
540 	panthor_pwr_irq_suspend(&ptdev->pwr->irq);
541 }
542 
panthor_pwr_resume(struct panthor_device * ptdev)543 void panthor_pwr_resume(struct panthor_device *ptdev)
544 {
545 	if (!ptdev->pwr)
546 		return;
547 
548 	panthor_pwr_irq_resume(&ptdev->pwr->irq, PWR_INTERRUPTS_MASK);
549 }
550