xref: /linux/drivers/gpu/drm/imagination/pvr_device.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_device_info.h"
6 
7 #include "pvr_fw.h"
8 #include "pvr_params.h"
9 #include "pvr_power.h"
10 #include "pvr_queue.h"
11 #include "pvr_rogue_cr_defs.h"
12 #include "pvr_stream.h"
13 #include "pvr_vm.h"
14 
15 #include <drm/drm_print.h>
16 
17 #include <linux/bitfield.h>
18 #include <linux/clk.h>
19 #include <linux/compiler_attributes.h>
20 #include <linux/compiler_types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/err.h>
23 #include <linux/firmware.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
26 #include <linux/of.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/reset.h>
30 #include <linux/slab.h>
31 #include <linux/stddef.h>
32 #include <linux/types.h>
33 #include <linux/workqueue.h>
34 
35 /* Major number for the supported version of the firmware. */
36 #define PVR_FW_VERSION_MAJOR 1
37 
38 /**
39  * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
40  * control registers.
41  * @pvr_dev: Target PowerVR device.
42  *
43  * Sets struct pvr_device->regs.
44  *
45  * This method of mapping the device control registers into memory ensures that
46  * they are unmapped when the driver is detached (i.e. no explicit cleanup is
47  * required).
48  *
49  * Return:
50  *  * 0 on success, or
51  *  * Any error returned by devm_platform_ioremap_resource().
52  */
53 static int
54 pvr_device_reg_init(struct pvr_device *pvr_dev)
55 {
56 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
57 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
58 	struct resource *regs_resource;
59 	void __iomem *regs;
60 
61 	pvr_dev->regs_resource = NULL;
62 	pvr_dev->regs = NULL;
63 
64 	regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
65 	if (IS_ERR(regs))
66 		return dev_err_probe(drm_dev->dev, PTR_ERR(regs),
67 				     "failed to ioremap gpu registers\n");
68 
69 	pvr_dev->regs = regs;
70 	pvr_dev->regs_resource = regs_resource;
71 
72 	return 0;
73 }
74 
75 /**
76  * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
77  * @pvr_dev: Target PowerVR device.
78  *
79  * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
80  * struct pvr_device->mem_clk.
81  *
82  * Three clocks are required by the PowerVR device: core, sys and mem. On
83  * return, this function guarantees that the clocks are in one of the following
84  * states:
85  *
86  *  * All successfully initialized,
87  *  * Core errored, sys and mem uninitialized,
88  *  * Core deinitialized, sys errored, mem uninitialized, or
89  *  * Core and sys deinitialized, mem errored.
90  *
91  * Return:
92  *  * 0 on success,
93  *  * Any error returned by devm_clk_get(), or
94  *  * Any error returned by devm_clk_get_optional().
95  */
96 static int pvr_device_clk_init(struct pvr_device *pvr_dev)
97 {
98 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
99 	struct clk *core_clk;
100 	struct clk *sys_clk;
101 	struct clk *mem_clk;
102 
103 	core_clk = devm_clk_get(drm_dev->dev, "core");
104 	if (IS_ERR(core_clk))
105 		return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
106 				     "failed to get core clock\n");
107 
108 	sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
109 	if (IS_ERR(sys_clk))
110 		return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
111 				     "failed to get sys clock\n");
112 
113 	mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
114 	if (IS_ERR(mem_clk))
115 		return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
116 				     "failed to get mem clock\n");
117 
118 	pvr_dev->core_clk = core_clk;
119 	pvr_dev->sys_clk = sys_clk;
120 	pvr_dev->mem_clk = mem_clk;
121 
122 	return 0;
123 }
124 
125 /**
126  * pvr_device_process_active_queues() - Process all queue related events.
127  * @pvr_dev: PowerVR device to check
128  *
129  * This is called any time we receive a FW event. It iterates over all
130  * active queues and calls pvr_queue_process() on them.
131  */
132 static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
133 {
134 	struct pvr_queue *queue, *tmp_queue;
135 	LIST_HEAD(active_queues);
136 
137 	mutex_lock(&pvr_dev->queues.lock);
138 
139 	/* Move all active queues to a temporary list. Queues that remain
140 	 * active after we're done processing them are re-inserted to
141 	 * the queues.active list by pvr_queue_process().
142 	 */
143 	list_splice_init(&pvr_dev->queues.active, &active_queues);
144 
145 	list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
146 		pvr_queue_process(queue);
147 
148 	mutex_unlock(&pvr_dev->queues.lock);
149 }
150 
151 static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
152 {
153 	u32 events;
154 
155 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
156 
157 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
158 
159 	return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
160 }
161 
162 static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
163 {
164 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
165 
166 	pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
167 		       ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
168 }
169 
170 static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
171 {
172 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
173 	u32 events;
174 
175 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
176 
177 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
178 
179 	/* Handle only these events on the host and leave the rest to the FW. */
180 	events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
181 		ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
182 
183 	pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
184 
185 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
186 		u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
187 
188 		pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
189 
190 		drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
191 	}
192 
193 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
194 		/*
195 		 * The watchdog timer is disabled by the driver so this event
196 		 * should never be fired.
197 		 */
198 		drm_info(drm_dev, "Safety event: Watchdog timeout\n");
199 	}
200 }
201 
202 static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
203 {
204 	struct pvr_device *pvr_dev = data;
205 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
206 	irqreturn_t ret = IRQ_NONE;
207 
208 	/* We are in the threaded handler, we can keep dequeuing events until we
209 	 * don't see any. This should allow us to reduce the number of interrupts
210 	 * when the GPU is receiving a massive amount of short jobs.
211 	 */
212 	while (pvr_fw_irq_pending(pvr_dev)) {
213 		pvr_fw_irq_clear(pvr_dev);
214 
215 		if (pvr_dev->fw_dev.booted) {
216 			pvr_fwccb_process(pvr_dev);
217 			pvr_kccb_wake_up_waiters(pvr_dev);
218 			pvr_device_process_active_queues(pvr_dev);
219 		}
220 
221 		pm_runtime_mark_last_busy(drm_dev->dev);
222 
223 		ret = IRQ_HANDLED;
224 	}
225 
226 	if (pvr_dev->has_safety_events) {
227 		int err;
228 
229 		/*
230 		 * Ensure the GPU is powered on since some safety events (such
231 		 * as ECC faults) can happen outside of job submissions, which
232 		 * are otherwise the only time a power reference is held.
233 		 */
234 		err = pvr_power_get(pvr_dev);
235 		if (err) {
236 			drm_err_ratelimited(drm_dev,
237 					    "%s: could not take power reference (%d)\n",
238 					    __func__, err);
239 			return ret;
240 		}
241 
242 		while (pvr_device_safety_irq_pending(pvr_dev)) {
243 			pvr_device_safety_irq_clear(pvr_dev);
244 			pvr_device_handle_safety_events(pvr_dev);
245 
246 			ret = IRQ_HANDLED;
247 		}
248 
249 		pvr_power_put(pvr_dev);
250 	}
251 
252 	return ret;
253 }
254 
255 static irqreturn_t pvr_device_irq_handler(int irq, void *data)
256 {
257 	struct pvr_device *pvr_dev = data;
258 	bool safety_irq_pending = false;
259 
260 	if (pvr_dev->has_safety_events)
261 		safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
262 
263 	if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
264 		return IRQ_NONE; /* Spurious IRQ - ignore. */
265 
266 	return IRQ_WAKE_THREAD;
267 }
268 
269 static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
270 {
271 	u32 num_ecc_rams = 0;
272 
273 	/*
274 	 * Safety events are an optional feature of the RogueXE platform. They
275 	 * are only enabled if at least one of ECC memory or the watchdog timer
276 	 * are present in HW. While safety events can be generated by other
277 	 * systems, that will never happen if the above mentioned hardware is
278 	 * not present.
279 	 */
280 	if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
281 		pvr_dev->has_safety_events = false;
282 		return;
283 	}
284 
285 	PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
286 
287 	pvr_dev->has_safety_events =
288 		num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
289 }
290 
291 /**
292  * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
293  * @pvr_dev: Target PowerVR device.
294  *
295  * Returns:
296  *  * 0 on success,
297  *  * Any error returned by platform_get_irq_byname(), or
298  *  * Any error returned by request_irq().
299  */
300 static int
301 pvr_device_irq_init(struct pvr_device *pvr_dev)
302 {
303 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
304 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
305 
306 	init_waitqueue_head(&pvr_dev->kccb.rtn_q);
307 
308 	pvr_device_safety_irq_init(pvr_dev);
309 
310 	pvr_dev->irq = platform_get_irq(plat_dev, 0);
311 	if (pvr_dev->irq < 0)
312 		return pvr_dev->irq;
313 
314 	/* Clear any pending events before requesting the IRQ line. */
315 	pvr_fw_irq_clear(pvr_dev);
316 
317 	if (pvr_dev->has_safety_events)
318 		pvr_device_safety_irq_clear(pvr_dev);
319 
320 	/*
321 	 * The ONESHOT flag ensures IRQs are masked while the thread handler is
322 	 * running.
323 	 */
324 	return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
325 				    pvr_device_irq_thread_handler,
326 				    IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
327 }
328 
329 /**
330  * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
331  * @pvr_dev: Target PowerVR device.
332  */
333 static void
334 pvr_device_irq_fini(struct pvr_device *pvr_dev)
335 {
336 	free_irq(pvr_dev->irq, pvr_dev);
337 }
338 
339 /**
340  * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
341  * @pvr_dev: Target PowerVR device.
342  * @base: First part of the filename.
343  * @major: Major version number.
344  *
345  * A PowerVR firmware filename consists of three parts separated by underscores
346  * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
347  * of @base, the second part is the hardware version string derived from @pvr_fw
348  * and the final part is the firmware version number constructed from @major with
349  * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
350  *
351  * The returned string will have been slab allocated and must be freed with
352  * kfree().
353  *
354  * Return:
355  *  * The constructed filename on success, or
356  *  * Any error returned by kasprintf().
357  */
358 static char *
359 pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
360 			    u8 major)
361 {
362 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
363 
364 	return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
365 			 gpu_id->v, gpu_id->n, gpu_id->c, major);
366 }
367 
368 static void
369 pvr_release_firmware(void *data)
370 {
371 	struct pvr_device *pvr_dev = data;
372 
373 	release_firmware(pvr_dev->fw_dev.firmware);
374 }
375 
376 /**
377  * pvr_request_firmware() - Load firmware for a PowerVR device
378  * @pvr_dev: Target PowerVR device.
379  *
380  * See pvr_build_firmware_filename() for details on firmware file naming.
381  *
382  * Return:
383  *  * 0 on success,
384  *  * Any error returned by pvr_build_firmware_filename(), or
385  *  * Any error returned by request_firmware().
386  */
387 static int
388 pvr_request_firmware(struct pvr_device *pvr_dev)
389 {
390 	struct drm_device *drm_dev = &pvr_dev->base;
391 	char *filename;
392 	const struct firmware *fw;
393 	int err;
394 
395 	filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
396 					       PVR_FW_VERSION_MAJOR);
397 	if (!filename)
398 		return -ENOMEM;
399 
400 	/*
401 	 * This function takes a copy of &filename, meaning we can free our
402 	 * instance before returning.
403 	 */
404 	err = request_firmware(&fw, filename, pvr_dev->base.dev);
405 	if (err) {
406 		drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
407 			filename, err);
408 		goto err_free_filename;
409 	}
410 
411 	drm_info(drm_dev, "loaded firmware %s\n", filename);
412 	kfree(filename);
413 
414 	pvr_dev->fw_dev.firmware = fw;
415 
416 	return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev);
417 
418 err_free_filename:
419 	kfree(filename);
420 
421 	return err;
422 }
423 
424 /**
425  * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers.
426  *
427  * Sets struct pvr_dev.gpu_id.
428  *
429  * @pvr_dev: Target PowerVR device.
430  */
431 static void
432 pvr_load_gpu_id(struct pvr_device *pvr_dev)
433 {
434 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
435 	u64 bvnc;
436 
437 	/*
438 	 * Try reading the BVNC using the newer (cleaner) method first. If the
439 	 * B value is zero, fall back to the older method.
440 	 */
441 	bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC);
442 
443 	gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
444 	if (gpu_id->b != 0) {
445 		gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
446 		gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
447 		gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
448 	} else {
449 		u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION);
450 		u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID);
451 		u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
452 
453 		gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
454 		gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
455 		gpu_id->n = FIELD_GET(0xFF00, core_id_config);
456 		gpu_id->c = FIELD_GET(0x00FF, core_id_config);
457 	}
458 }
459 
460 /**
461  * pvr_set_dma_info() - Set PowerVR device DMA information
462  * @pvr_dev: Target PowerVR device.
463  *
464  * Sets the DMA mask and max segment size for the PowerVR device.
465  *
466  * Return:
467  *  * 0 on success,
468  *  * Any error returned by PVR_FEATURE_VALUE(), or
469  *  * Any error returned by dma_set_mask().
470  */
471 
472 static int
473 pvr_set_dma_info(struct pvr_device *pvr_dev)
474 {
475 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
476 	u16 phys_bus_width;
477 	int err;
478 
479 	err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
480 	if (err) {
481 		drm_err(drm_dev, "Failed to get device physical bus width\n");
482 		return err;
483 	}
484 
485 	err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
486 	if (err) {
487 		drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
488 		return err;
489 	}
490 
491 	dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
492 
493 	return 0;
494 }
495 
496 /**
497  * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
498  * @pvr_dev: Target PowerVR device.
499  *
500  * The following steps are taken to ensure the device is ready:
501  *
502  *  1. Read the hardware version information from control registers,
503  *  2. Initialise the hardware feature information,
504  *  3. Setup the device DMA information,
505  *  4. Setup the device-scoped memory context, and
506  *  5. Load firmware into the device.
507  *
508  * Return:
509  *  * 0 on success,
510  *  * -%ENODEV if the GPU is not supported,
511  *  * Any error returned by pvr_set_dma_info(),
512  *  * Any error returned by pvr_memory_context_init(), or
513  *  * Any error returned by pvr_request_firmware().
514  */
515 static int
516 pvr_device_gpu_init(struct pvr_device *pvr_dev)
517 {
518 	int err;
519 
520 	pvr_load_gpu_id(pvr_dev);
521 
522 	err = pvr_request_firmware(pvr_dev);
523 	if (err)
524 		return err;
525 
526 	err = pvr_fw_validate_init_device_info(pvr_dev);
527 	if (err)
528 		return err;
529 
530 	if (PVR_HAS_FEATURE(pvr_dev, meta))
531 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
532 	else if (PVR_HAS_FEATURE(pvr_dev, mips))
533 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
534 	else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor))
535 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
536 	else
537 		return -EINVAL;
538 
539 	pvr_stream_create_musthave_masks(pvr_dev);
540 
541 	err = pvr_set_dma_info(pvr_dev);
542 	if (err)
543 		return err;
544 
545 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
546 		pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
547 		if (IS_ERR(pvr_dev->kernel_vm_ctx))
548 			return PTR_ERR(pvr_dev->kernel_vm_ctx);
549 	}
550 
551 	err = pvr_fw_init(pvr_dev);
552 	if (err)
553 		goto err_vm_ctx_put;
554 
555 	return 0;
556 
557 err_vm_ctx_put:
558 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
559 		pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
560 		pvr_dev->kernel_vm_ctx = NULL;
561 	}
562 
563 	return err;
564 }
565 
566 /**
567  * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
568  * @pvr_dev: Target PowerVR device.
569  */
570 static void
571 pvr_device_gpu_fini(struct pvr_device *pvr_dev)
572 {
573 	pvr_fw_fini(pvr_dev);
574 
575 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
576 		WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
577 		pvr_dev->kernel_vm_ctx = NULL;
578 	}
579 }
580 
581 /**
582  * pvr_device_init() - Initialize a PowerVR device
583  * @pvr_dev: Target PowerVR device.
584  *
585  * If this function returns successfully, the device will have been fully
586  * initialized. Otherwise, any parts of the device initialized before an error
587  * occurs will be de-initialized before returning.
588  *
589  * NOTE: The initialization steps currently taken are the bare minimum required
590  *       to read from the control registers. The device is unlikely to function
591  *       until further initialization steps are added. [This note should be
592  *       removed when that happens.]
593  *
594  * Return:
595  *  * 0 on success,
596  *  * Any error returned by pvr_device_reg_init(),
597  *  * Any error returned by pvr_device_clk_init(), or
598  *  * Any error returned by pvr_device_gpu_init().
599  */
600 int
601 pvr_device_init(struct pvr_device *pvr_dev)
602 {
603 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
604 	struct device *dev = drm_dev->dev;
605 	int err;
606 
607 	/* Get the platform-specific data based on the compatible string. */
608 	pvr_dev->device_data = of_device_get_match_data(dev);
609 
610 	/*
611 	 * Setup device parameters. We do this first in case other steps
612 	 * depend on them.
613 	 */
614 	err = pvr_device_params_init(&pvr_dev->params);
615 	if (err)
616 		return err;
617 
618 	/* Enable and initialize clocks required for the device to operate. */
619 	err = pvr_device_clk_init(pvr_dev);
620 	if (err)
621 		return err;
622 
623 	err = pvr_dev->device_data->pwr_ops->init(pvr_dev);
624 	if (err)
625 		return err;
626 
627 	/* Explicitly power the GPU so we can access control registers before the FW is booted. */
628 	err = pm_runtime_resume_and_get(dev);
629 	if (err)
630 		return err;
631 
632 	/* Map the control registers into memory. */
633 	err = pvr_device_reg_init(pvr_dev);
634 	if (err)
635 		goto err_pm_runtime_put;
636 
637 	/* Perform GPU-specific initialization steps. */
638 	err = pvr_device_gpu_init(pvr_dev);
639 	if (err)
640 		goto err_pm_runtime_put;
641 
642 	err = pvr_device_irq_init(pvr_dev);
643 	if (err)
644 		goto err_device_gpu_fini;
645 
646 	pm_runtime_put(dev);
647 
648 	return 0;
649 
650 err_device_gpu_fini:
651 	pvr_device_gpu_fini(pvr_dev);
652 
653 err_pm_runtime_put:
654 	pm_runtime_put_sync_suspend(dev);
655 
656 	return err;
657 }
658 
659 /**
660  * pvr_device_fini() - Deinitialize a PowerVR device
661  * @pvr_dev: Target PowerVR device.
662  */
663 void
664 pvr_device_fini(struct pvr_device *pvr_dev)
665 {
666 	/*
667 	 * Deinitialization stages are performed in reverse order compared to
668 	 * the initialization stages in pvr_device_init().
669 	 */
670 	pvr_device_irq_fini(pvr_dev);
671 	pvr_device_gpu_fini(pvr_dev);
672 }
673 
674 bool
675 pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
676 {
677 	switch (quirk) {
678 	case 47217:
679 		return PVR_HAS_QUIRK(pvr_dev, 47217);
680 	case 48545:
681 		return PVR_HAS_QUIRK(pvr_dev, 48545);
682 	case 49927:
683 		return PVR_HAS_QUIRK(pvr_dev, 49927);
684 	case 51764:
685 		return PVR_HAS_QUIRK(pvr_dev, 51764);
686 	case 62269:
687 		return PVR_HAS_QUIRK(pvr_dev, 62269);
688 	default:
689 		return false;
690 	};
691 }
692 
693 bool
694 pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
695 {
696 	switch (enhancement) {
697 	case 35421:
698 		return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
699 	case 42064:
700 		return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
701 	default:
702 		return false;
703 	};
704 }
705 
706 /**
707  * pvr_device_has_feature() - Look up device feature based on feature definition
708  * @pvr_dev: Device pointer.
709  * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
710  *
711  * Returns:
712  *  * %true if feature is present on device, or
713  *  * %false if feature is not present on device.
714  */
715 bool
716 pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
717 {
718 	switch (feature) {
719 	case PVR_FEATURE_CLUSTER_GROUPING:
720 		return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
721 
722 	case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
723 		return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
724 
725 	case PVR_FEATURE_FB_CDC_V4:
726 		return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
727 
728 	case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
729 		return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
730 
731 	case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
732 		return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
733 
734 	case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
735 		return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
736 
737 	case PVR_FEATURE_TESSELLATION:
738 		return PVR_HAS_FEATURE(pvr_dev, tessellation);
739 
740 	case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
741 		return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
742 
743 	case PVR_FEATURE_VDM_DRAWINDIRECT:
744 		return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
745 
746 	case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
747 		return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
748 
749 	case PVR_FEATURE_ZLS_SUBTILE:
750 		return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
751 
752 	/* Derived features. */
753 	case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
754 		u8 cdm_control_stream_format = 0;
755 
756 		PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
757 		return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
758 	}
759 
760 	case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
761 		if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
762 			u8 fbcdc_algorithm = 0;
763 
764 			PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
765 			return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
766 		}
767 		return false;
768 
769 	default:
770 		WARN(true, "Looking up undefined feature %u\n", feature);
771 		return false;
772 	}
773 }
774