xref: /linux/drivers/gpu/drm/imagination/pvr_device.c (revision a0c83177734ab98623795e1ba2cf4b72c23de5e7)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_device_info.h"
6 
7 #include "pvr_fw.h"
8 #include "pvr_power.h"
9 #include "pvr_queue.h"
10 #include "pvr_rogue_cr_defs.h"
11 #include "pvr_stream.h"
12 #include "pvr_vm.h"
13 
14 #include <drm/drm_print.h>
15 
16 #include <linux/bitfield.h>
17 #include <linux/clk.h>
18 #include <linux/compiler_attributes.h>
19 #include <linux/compiler_types.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/firmware.h>
23 #include <linux/gfp.h>
24 #include <linux/interrupt.h>
25 #include <linux/of.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/reset.h>
29 #include <linux/slab.h>
30 #include <linux/stddef.h>
31 #include <linux/types.h>
32 #include <linux/workqueue.h>
33 
34 #include <kunit/visibility.h>
35 
36 /* Major number for the supported version of the firmware. */
37 #define PVR_FW_VERSION_MAJOR 1
38 
39 /**
40  * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
41  * control registers.
42  * @pvr_dev: Target PowerVR device.
43  *
44  * Sets struct pvr_device->regs.
45  *
46  * This method of mapping the device control registers into memory ensures that
47  * they are unmapped when the driver is detached (i.e. no explicit cleanup is
48  * required).
49  *
50  * Return:
51  *  * 0 on success, or
52  *  * Any error returned by devm_platform_get_and_ioremap_resource().
53  */
54 static int
pvr_device_reg_init(struct pvr_device * pvr_dev)55 pvr_device_reg_init(struct pvr_device *pvr_dev)
56 {
57 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
58 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
59 	struct resource *regs_resource;
60 	void __iomem *regs;
61 
62 	pvr_dev->regs_resource = NULL;
63 	pvr_dev->regs = NULL;
64 
65 	regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
66 	if (IS_ERR(regs))
67 		return dev_err_probe(drm_dev->dev, PTR_ERR(regs),
68 				     "failed to ioremap gpu registers\n");
69 
70 	pvr_dev->regs = regs;
71 	pvr_dev->regs_resource = regs_resource;
72 
73 	return 0;
74 }
75 
76 /**
77  * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
78  * @pvr_dev: Target PowerVR device.
79  *
80  * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
81  * struct pvr_device->mem_clk.
82  *
83  * Three clocks are required by the PowerVR device: core, sys and mem. On
84  * return, this function guarantees that the clocks are in one of the following
85  * states:
86  *
87  *  * All successfully initialized,
88  *  * Core errored, sys and mem uninitialized,
89  *  * Core deinitialized, sys errored, mem uninitialized, or
90  *  * Core and sys deinitialized, mem errored.
91  *
92  * Return:
93  *  * 0 on success,
94  *  * Any error returned by devm_clk_get(), or
95  *  * Any error returned by devm_clk_get_optional().
96  */
pvr_device_clk_init(struct pvr_device * pvr_dev)97 static int pvr_device_clk_init(struct pvr_device *pvr_dev)
98 {
99 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
100 	struct clk *core_clk;
101 	struct clk *sys_clk;
102 	struct clk *mem_clk;
103 
104 	core_clk = devm_clk_get(drm_dev->dev, "core");
105 	if (IS_ERR(core_clk))
106 		return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
107 				     "failed to get core clock\n");
108 
109 	sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
110 	if (IS_ERR(sys_clk))
111 		return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
112 				     "failed to get sys clock\n");
113 
114 	mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
115 	if (IS_ERR(mem_clk))
116 		return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
117 				     "failed to get mem clock\n");
118 
119 	pvr_dev->core_clk = core_clk;
120 	pvr_dev->sys_clk = sys_clk;
121 	pvr_dev->mem_clk = mem_clk;
122 
123 	return 0;
124 }
125 
126 /**
127  * pvr_device_process_active_queues() - Process all queue related events.
128  * @pvr_dev: PowerVR device to check
129  *
130  * This is called any time we receive a FW event. It iterates over all
131  * active queues and calls pvr_queue_process() on them.
132  */
pvr_device_process_active_queues(struct pvr_device * pvr_dev)133 static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
134 {
135 	struct pvr_queue *queue, *tmp_queue;
136 	LIST_HEAD(active_queues);
137 
138 	mutex_lock(&pvr_dev->queues.lock);
139 
140 	/* Move all active queues to a temporary list. Queues that remain
141 	 * active after we're done processing them are re-inserted to
142 	 * the queues.active list by pvr_queue_process().
143 	 */
144 	list_splice_init(&pvr_dev->queues.active, &active_queues);
145 
146 	list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
147 		pvr_queue_process(queue);
148 
149 	mutex_unlock(&pvr_dev->queues.lock);
150 }
151 
pvr_device_safety_irq_pending(struct pvr_device * pvr_dev)152 static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
153 {
154 	u32 events;
155 
156 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
157 
158 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
159 
160 	return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
161 }
162 
pvr_device_safety_irq_clear(struct pvr_device * pvr_dev)163 static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
164 {
165 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
166 
167 	pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
168 		       ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
169 }
170 
pvr_device_handle_safety_events(struct pvr_device * pvr_dev)171 static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
172 {
173 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
174 	u32 events;
175 
176 	WARN_ON_ONCE(!pvr_dev->has_safety_events);
177 
178 	events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
179 
180 	/* Handle only these events on the host and leave the rest to the FW. */
181 	events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
182 		ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
183 
184 	pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
185 
186 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
187 		u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
188 
189 		pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
190 
191 		drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
192 	}
193 
194 	if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
195 		/*
196 		 * The watchdog timer is disabled by the driver so this event
197 		 * should never be fired.
198 		 */
199 		drm_info(drm_dev, "Safety event: Watchdog timeout\n");
200 	}
201 }
202 
pvr_device_irq_thread_handler(int irq,void * data)203 static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
204 {
205 	struct pvr_device *pvr_dev = data;
206 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
207 	irqreturn_t ret = IRQ_NONE;
208 
209 	/* We are in the threaded handler, we can keep dequeuing events until we
210 	 * don't see any. This should allow us to reduce the number of interrupts
211 	 * when the GPU is receiving a massive amount of short jobs.
212 	 */
213 	while (pvr_fw_irq_pending(pvr_dev)) {
214 		pvr_fw_irq_clear(pvr_dev);
215 
216 		if (pvr_dev->fw_dev.booted) {
217 			pvr_fwccb_process(pvr_dev);
218 			pvr_kccb_wake_up_waiters(pvr_dev);
219 			pvr_device_process_active_queues(pvr_dev);
220 		}
221 
222 		pm_runtime_mark_last_busy(drm_dev->dev);
223 
224 		ret = IRQ_HANDLED;
225 	}
226 
227 	if (pvr_dev->has_safety_events) {
228 		while (pvr_device_safety_irq_pending(pvr_dev)) {
229 			pvr_device_safety_irq_clear(pvr_dev);
230 			pvr_device_handle_safety_events(pvr_dev);
231 
232 			ret = IRQ_HANDLED;
233 		}
234 	}
235 
236 	return ret;
237 }
238 
pvr_device_irq_handler(int irq,void * data)239 static irqreturn_t pvr_device_irq_handler(int irq, void *data)
240 {
241 	struct pvr_device *pvr_dev = data;
242 	bool safety_irq_pending = false;
243 
244 	if (pvr_dev->has_safety_events)
245 		safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
246 
247 	if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
248 		return IRQ_NONE; /* Spurious IRQ - ignore. */
249 
250 	return IRQ_WAKE_THREAD;
251 }
252 
pvr_device_safety_irq_init(struct pvr_device * pvr_dev)253 static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
254 {
255 	u32 num_ecc_rams = 0;
256 
257 	/*
258 	 * Safety events are an optional feature of the RogueXE platform. They
259 	 * are only enabled if at least one of ECC memory or the watchdog timer
260 	 * are present in HW. While safety events can be generated by other
261 	 * systems, that will never happen if the above mentioned hardware is
262 	 * not present.
263 	 */
264 	if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
265 		pvr_dev->has_safety_events = false;
266 		return;
267 	}
268 
269 	PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
270 
271 	pvr_dev->has_safety_events =
272 		num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
273 }
274 
275 /**
276  * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
277  * @pvr_dev: Target PowerVR device.
278  *
279  * Returns:
280  *  * 0 on success,
281  *  * Any error returned by platform_get_irq_byname(), or
282  *  * Any error returned by request_irq().
283  */
284 static int
pvr_device_irq_init(struct pvr_device * pvr_dev)285 pvr_device_irq_init(struct pvr_device *pvr_dev)
286 {
287 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
288 	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
289 
290 	init_waitqueue_head(&pvr_dev->kccb.rtn_q);
291 
292 	pvr_device_safety_irq_init(pvr_dev);
293 
294 	pvr_dev->irq = platform_get_irq(plat_dev, 0);
295 	if (pvr_dev->irq < 0)
296 		return pvr_dev->irq;
297 
298 	/* Clear any pending events before requesting the IRQ line. */
299 	pvr_fw_irq_clear(pvr_dev);
300 
301 	if (pvr_dev->has_safety_events)
302 		pvr_device_safety_irq_clear(pvr_dev);
303 
304 	/*
305 	 * The ONESHOT flag ensures IRQs are masked while the thread handler is
306 	 * running.
307 	 */
308 	return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
309 				    pvr_device_irq_thread_handler,
310 				    IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
311 }
312 
313 /**
314  * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
315  * @pvr_dev: Target PowerVR device.
316  */
317 static void
pvr_device_irq_fini(struct pvr_device * pvr_dev)318 pvr_device_irq_fini(struct pvr_device *pvr_dev)
319 {
320 	free_irq(pvr_dev->irq, pvr_dev);
321 }
322 
323 /**
324  * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
325  * @pvr_dev: Target PowerVR device.
326  * @base: First part of the filename.
327  * @major: Major version number.
328  *
329  * A PowerVR firmware filename consists of three parts separated by underscores
330  * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
331  * of @base, the second part is the hardware version string derived from @pvr_fw
332  * and the final part is the firmware version number constructed from @major with
333  * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
334  *
335  * The returned string will have been slab allocated and must be freed with
336  * kfree().
337  *
338  * Return:
339  *  * The constructed filename on success, or
340  *  * Any error returned by kasprintf().
341  */
342 static char *
pvr_build_firmware_filename(struct pvr_device * pvr_dev,const char * base,u8 major)343 pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
344 			    u8 major)
345 {
346 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
347 
348 	return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
349 			 gpu_id->v, gpu_id->n, gpu_id->c, major);
350 }
351 
352 static void
pvr_release_firmware(void * data)353 pvr_release_firmware(void *data)
354 {
355 	struct pvr_device *pvr_dev = data;
356 
357 	release_firmware(pvr_dev->fw_dev.firmware);
358 }
359 
360 /**
361  * pvr_request_firmware() - Load firmware for a PowerVR device
362  * @pvr_dev: Target PowerVR device.
363  *
364  * See pvr_build_firmware_filename() for details on firmware file naming.
365  *
366  * Return:
367  *  * 0 on success,
368  *  * Any error returned by pvr_build_firmware_filename(), or
369  *  * Any error returned by request_firmware().
370  */
371 static int
pvr_request_firmware(struct pvr_device * pvr_dev)372 pvr_request_firmware(struct pvr_device *pvr_dev)
373 {
374 	struct drm_device *drm_dev = &pvr_dev->base;
375 	char *filename;
376 	const struct firmware *fw;
377 	int err;
378 
379 	filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
380 					       PVR_FW_VERSION_MAJOR);
381 	if (!filename)
382 		return -ENOMEM;
383 
384 	/*
385 	 * This function takes a copy of &filename, meaning we can free our
386 	 * instance before returning.
387 	 */
388 	err = request_firmware(&fw, filename, pvr_dev->base.dev);
389 	if (err) {
390 		drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
391 			filename, err);
392 		goto err_free_filename;
393 	}
394 
395 	drm_info(drm_dev, "loaded firmware %s\n", filename);
396 	kfree(filename);
397 
398 	pvr_dev->fw_dev.firmware = fw;
399 
400 	return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev);
401 
402 err_free_filename:
403 	kfree(filename);
404 
405 	return err;
406 }
407 
408 /**
409  * pvr_gpuid_decode_reg() - Decode the GPU ID from GPU register
410  *
411  * Sets the b, v, n, c fields of struct pvr_dev.gpu_id.
412  *
413  * @pvr_dev: Target PowerVR device.
414  * @gpu_id: Output to be updated with the GPU ID.
415  */
416 static void
pvr_gpuid_decode_reg(const struct pvr_device * pvr_dev,struct pvr_gpu_id * gpu_id)417 pvr_gpuid_decode_reg(const struct pvr_device *pvr_dev, struct pvr_gpu_id *gpu_id)
418 {
419 	/*
420 	 * Try reading the BVNC using the newer (cleaner) method first. If the
421 	 * B value is zero, fall back to the older method.
422 	 */
423 	u64 bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC);
424 
425 	gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
426 	if (gpu_id->b != 0) {
427 		gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
428 		gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
429 		gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
430 	} else {
431 		u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION);
432 		u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID);
433 		u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
434 
435 		gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
436 		gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
437 		gpu_id->n = FIELD_GET(0xFF00, core_id_config);
438 		gpu_id->c = FIELD_GET(0x00FF, core_id_config);
439 	}
440 }
441 
442 /**
443  * pvr_gpuid_decode_string() - Decode the GPU ID from a module input string
444  *
445  * Sets the b, v, n, c fields of struct pvr_dev.gpu_id.
446  *
447  * @pvr_dev: Target PowerVR device.
448  * @param_bvnc: GPU ID (BVNC) module parameter.
449  * @gpu_id: Output to be updated with the GPU ID.
450  */
451 VISIBLE_IF_KUNIT int
pvr_gpuid_decode_string(const struct pvr_device * pvr_dev,const char * param_bvnc,struct pvr_gpu_id * gpu_id)452 pvr_gpuid_decode_string(const struct pvr_device *pvr_dev,
453 			const char *param_bvnc, struct pvr_gpu_id *gpu_id)
454 {
455 	const struct drm_device *drm_dev = &pvr_dev->base;
456 	char str_cpy[PVR_GPUID_STRING_MAX_LENGTH];
457 	char *pos, *tkn;
458 	int ret, idx = 0;
459 	u16 user_bvnc_u16[4];
460 	u8 dot_cnt = 0;
461 
462 	ret = strscpy(str_cpy, param_bvnc);
463 
464 	/*
465 	 * strscpy() should return at least a size 7 for the input to be valid.
466 	 * Returns -E2BIG for the case when the string is empty or too long.
467 	 */
468 	if (ret < PVR_GPUID_STRING_MIN_LENGTH) {
469 		drm_info(drm_dev,
470 			 "Invalid size of the input GPU ID (BVNC): %s",
471 			 str_cpy);
472 		return -EINVAL;
473 	}
474 
475 	while (*param_bvnc) {
476 		if (*param_bvnc == '.')
477 			dot_cnt++;
478 		param_bvnc++;
479 	}
480 
481 	if (dot_cnt != 3) {
482 		drm_info(drm_dev,
483 			 "Invalid format of the input GPU ID (BVNC): %s",
484 			 str_cpy);
485 		return -EINVAL;
486 	}
487 
488 	pos = str_cpy;
489 
490 	while ((tkn = strsep(&pos, ".")) != NULL && idx < 4) {
491 		/* kstrtou16() will also handle the case of consecutive dots */
492 		ret = kstrtou16(tkn, 10, &user_bvnc_u16[idx]);
493 		if (ret) {
494 			drm_info(drm_dev,
495 				 "Invalid format of the input GPU ID (BVNC): %s",
496 				 str_cpy);
497 			return -EINVAL;
498 		}
499 		idx++;
500 	}
501 
502 	gpu_id->b = user_bvnc_u16[0];
503 	gpu_id->v = user_bvnc_u16[1];
504 	gpu_id->n = user_bvnc_u16[2];
505 	gpu_id->c = user_bvnc_u16[3];
506 
507 	return 0;
508 }
509 EXPORT_SYMBOL_IF_KUNIT(pvr_gpuid_decode_string);
510 
511 static bool pvr_exp_hw_support;
512 module_param_named(exp_hw_support, pvr_exp_hw_support, bool, 0600);
513 MODULE_PARM_DESC(exp_hw_support, "Bypass runtime checks for fully supported GPU cores. WARNING: enabling this option may result in a buggy, insecure, or otherwise unusable driver.");
514 
515 /**
516  * enum pvr_gpu_support_level - The level of support for a gpu_id in the current
517  * version of the driver.
518  *
519  * @PVR_GPU_UNKNOWN: Cores that are unknown to the driver. These may not even exist.
520  * @PVR_GPU_EXPERIMENTAL: Cores that have experimental support.
521  * @PVR_GPU_SUPPORTED: Cores that are supported and maintained.
522  */
523 enum pvr_gpu_support_level {
524 	PVR_GPU_UNKNOWN,
525 	PVR_GPU_EXPERIMENTAL,
526 	PVR_GPU_SUPPORTED,
527 };
528 
529 static enum pvr_gpu_support_level
pvr_gpu_support_level(const struct pvr_gpu_id * gpu_id)530 pvr_gpu_support_level(const struct pvr_gpu_id *gpu_id)
531 {
532 	switch (pvr_gpu_id_to_packed_bvnc(gpu_id)) {
533 	case PVR_PACKED_BVNC(33, 15, 11, 3):
534 	case PVR_PACKED_BVNC(36, 53, 104, 796):
535 		return PVR_GPU_SUPPORTED;
536 
537 	case PVR_PACKED_BVNC(36, 52, 104, 182):
538 		return PVR_GPU_EXPERIMENTAL;
539 
540 	default:
541 		return PVR_GPU_UNKNOWN;
542 	}
543 }
544 
545 static int
pvr_check_gpu_supported(struct pvr_device * pvr_dev,const struct pvr_gpu_id * gpu_id)546 pvr_check_gpu_supported(struct pvr_device *pvr_dev,
547 			const struct pvr_gpu_id *gpu_id)
548 {
549 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
550 
551 	switch (pvr_gpu_support_level(gpu_id)) {
552 	case PVR_GPU_SUPPORTED:
553 		if (pvr_exp_hw_support)
554 			drm_info(drm_dev, "Module parameter 'exp_hw_support' was set, but this hardware is fully supported by the current driver.");
555 
556 		break;
557 
558 	case PVR_GPU_EXPERIMENTAL:
559 		if (!pvr_exp_hw_support) {
560 			drm_err(drm_dev, "Unsupported GPU! Set 'exp_hw_support' to bypass this check.");
561 			return -ENODEV;
562 		}
563 
564 		drm_warn(drm_dev, "Running on unsupported hardware; you may encounter bugs!");
565 		break;
566 
567 	/* NOTE: This code path may indicate misbehaving hardware. */
568 	case PVR_GPU_UNKNOWN:
569 	default:
570 		if (!pvr_exp_hw_support) {
571 			drm_err(drm_dev, "Unknown GPU! Set 'exp_hw_support' to bypass this check.");
572 			return -ENODEV;
573 		}
574 
575 		drm_warn(drm_dev, "Running on unknown hardware; expect issues.");
576 		break;
577 	}
578 
579 	return 0;
580 }
581 
582 static char *pvr_gpuid_override;
583 module_param_named(gpuid, pvr_gpuid_override, charp, 0400);
584 MODULE_PARM_DESC(gpuid, "GPU ID (BVNC) to be used instead of the value read from hardware.");
585 
586 /**
587  * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control
588  * registers or input parameter. The input parameter is processed instead
589  * of the GPU register if provided.
590  *
591  * Sets the arch field of struct pvr_dev.gpu_id.
592  *
593  * @pvr_dev: Target PowerVR device.
594  */
595 static int
pvr_load_gpu_id(struct pvr_device * pvr_dev)596 pvr_load_gpu_id(struct pvr_device *pvr_dev)
597 {
598 	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
599 
600 	if (!pvr_gpuid_override || !pvr_gpuid_override[0]) {
601 		pvr_gpuid_decode_reg(pvr_dev, gpu_id);
602 	} else {
603 		drm_warn(from_pvr_device(pvr_dev),
604 			 "Using custom GPU ID (BVNC) provided by the user!");
605 
606 		int err = pvr_gpuid_decode_string(pvr_dev, pvr_gpuid_override,
607 						  gpu_id);
608 		if (err)
609 			return err;
610 	}
611 
612 	return pvr_check_gpu_supported(pvr_dev, gpu_id);
613 }
614 
615 /**
616  * pvr_set_dma_info() - Set PowerVR device DMA information
617  * @pvr_dev: Target PowerVR device.
618  *
619  * Sets the DMA mask and max segment size for the PowerVR device.
620  *
621  * Return:
622  *  * 0 on success,
623  *  * Any error returned by PVR_FEATURE_VALUE(), or
624  *  * Any error returned by dma_set_mask().
625  */
626 
627 static int
pvr_set_dma_info(struct pvr_device * pvr_dev)628 pvr_set_dma_info(struct pvr_device *pvr_dev)
629 {
630 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
631 	u16 phys_bus_width;
632 	int err;
633 
634 	err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
635 	if (err) {
636 		drm_err(drm_dev, "Failed to get device physical bus width\n");
637 		return err;
638 	}
639 
640 	err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
641 	if (err) {
642 		drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
643 		return err;
644 	}
645 
646 	dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
647 
648 	return 0;
649 }
650 
651 /**
652  * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
653  * @pvr_dev: Target PowerVR device.
654  *
655  * The following steps are taken to ensure the device is ready:
656  *
657  *  1. Read the hardware version information from control registers,
658  *  2. Initialise the hardware feature information,
659  *  3. Setup the device DMA information,
660  *  4. Setup the device-scoped memory context, and
661  *  5. Load firmware into the device.
662  *
663  * Return:
664  *  * 0 on success,
665  *  * -%ENODEV if the GPU is not supported,
666  *  * Any error returned by pvr_set_dma_info(),
667  *  * Any error returned by pvr_memory_context_init(), or
668  *  * Any error returned by pvr_request_firmware().
669  */
670 static int
pvr_device_gpu_init(struct pvr_device * pvr_dev)671 pvr_device_gpu_init(struct pvr_device *pvr_dev)
672 {
673 	int err;
674 
675 	err = pvr_load_gpu_id(pvr_dev);
676 	if (err)
677 		return err;
678 
679 	err = pvr_request_firmware(pvr_dev);
680 	if (err)
681 		return err;
682 
683 	err = pvr_fw_validate_init_device_info(pvr_dev);
684 	if (err)
685 		return err;
686 
687 	if (PVR_HAS_FEATURE(pvr_dev, meta))
688 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
689 	else if (PVR_HAS_FEATURE(pvr_dev, mips))
690 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
691 	else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor))
692 		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
693 	else
694 		return -EINVAL;
695 
696 	pvr_stream_create_musthave_masks(pvr_dev);
697 
698 	err = pvr_set_dma_info(pvr_dev);
699 	if (err)
700 		return err;
701 
702 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
703 		pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
704 		if (IS_ERR(pvr_dev->kernel_vm_ctx))
705 			return PTR_ERR(pvr_dev->kernel_vm_ctx);
706 	}
707 
708 	err = pvr_fw_init(pvr_dev);
709 	if (err)
710 		goto err_vm_ctx_put;
711 
712 	return 0;
713 
714 err_vm_ctx_put:
715 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
716 		pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
717 		pvr_dev->kernel_vm_ctx = NULL;
718 	}
719 
720 	return err;
721 }
722 
723 /**
724  * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
725  * @pvr_dev: Target PowerVR device.
726  */
727 static void
pvr_device_gpu_fini(struct pvr_device * pvr_dev)728 pvr_device_gpu_fini(struct pvr_device *pvr_dev)
729 {
730 	pvr_fw_fini(pvr_dev);
731 
732 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
733 		WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
734 		pvr_dev->kernel_vm_ctx = NULL;
735 	}
736 }
737 
738 /**
739  * pvr_device_init() - Initialize a PowerVR device
740  * @pvr_dev: Target PowerVR device.
741  *
742  * If this function returns successfully, the device will have been fully
743  * initialized. Otherwise, any parts of the device initialized before an error
744  * occurs will be de-initialized before returning.
745  *
746  * NOTE: The initialization steps currently taken are the bare minimum required
747  *       to read from the control registers. The device is unlikely to function
748  *       until further initialization steps are added. [This note should be
749  *       removed when that happens.]
750  *
751  * Return:
752  *  * 0 on success,
753  *  * Any error returned by pvr_device_reg_init(),
754  *  * Any error returned by pvr_device_clk_init(), or
755  *  * Any error returned by pvr_device_gpu_init().
756  */
757 int
pvr_device_init(struct pvr_device * pvr_dev)758 pvr_device_init(struct pvr_device *pvr_dev)
759 {
760 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
761 	struct device *dev = drm_dev->dev;
762 	int err;
763 
764 	/* Get the platform-specific data based on the compatible string. */
765 	pvr_dev->device_data = of_device_get_match_data(dev);
766 
767 	/* Enable and initialize clocks required for the device to operate. */
768 	err = pvr_device_clk_init(pvr_dev);
769 	if (err)
770 		return err;
771 
772 	err = pvr_dev->device_data->pwr_ops->init(pvr_dev);
773 	if (err)
774 		return err;
775 
776 	/* Explicitly power the GPU so we can access control registers before the FW is booted. */
777 	err = pm_runtime_resume_and_get(dev);
778 	if (err)
779 		return err;
780 
781 	/* Map the control registers into memory. */
782 	err = pvr_device_reg_init(pvr_dev);
783 	if (err)
784 		goto err_pm_runtime_put;
785 
786 	/* Perform GPU-specific initialization steps. */
787 	err = pvr_device_gpu_init(pvr_dev);
788 	if (err)
789 		goto err_pm_runtime_put;
790 
791 	err = pvr_device_irq_init(pvr_dev);
792 	if (err)
793 		goto err_device_gpu_fini;
794 
795 	pm_runtime_put(dev);
796 
797 	return 0;
798 
799 err_device_gpu_fini:
800 	pvr_device_gpu_fini(pvr_dev);
801 
802 err_pm_runtime_put:
803 	pm_runtime_put_sync_suspend(dev);
804 
805 	return err;
806 }
807 
808 /**
809  * pvr_device_fini() - Deinitialize a PowerVR device
810  * @pvr_dev: Target PowerVR device.
811  */
812 void
pvr_device_fini(struct pvr_device * pvr_dev)813 pvr_device_fini(struct pvr_device *pvr_dev)
814 {
815 	/*
816 	 * Deinitialization stages are performed in reverse order compared to
817 	 * the initialization stages in pvr_device_init().
818 	 */
819 	pvr_device_irq_fini(pvr_dev);
820 	pvr_device_gpu_fini(pvr_dev);
821 }
822 
823 bool
pvr_device_has_uapi_quirk(struct pvr_device * pvr_dev,u32 quirk)824 pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
825 {
826 	switch (quirk) {
827 	case 47217:
828 		return PVR_HAS_QUIRK(pvr_dev, 47217);
829 	case 48545:
830 		return PVR_HAS_QUIRK(pvr_dev, 48545);
831 	case 49927:
832 		return PVR_HAS_QUIRK(pvr_dev, 49927);
833 	case 51764:
834 		return PVR_HAS_QUIRK(pvr_dev, 51764);
835 	case 62269:
836 		return PVR_HAS_QUIRK(pvr_dev, 62269);
837 	default:
838 		return false;
839 	};
840 }
841 
842 bool
pvr_device_has_uapi_enhancement(struct pvr_device * pvr_dev,u32 enhancement)843 pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
844 {
845 	switch (enhancement) {
846 	case 35421:
847 		return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
848 	case 42064:
849 		return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
850 	default:
851 		return false;
852 	};
853 }
854 
855 /**
856  * pvr_device_has_feature() - Look up device feature based on feature definition
857  * @pvr_dev: Device pointer.
858  * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
859  *
860  * Returns:
861  *  * %true if feature is present on device, or
862  *  * %false if feature is not present on device.
863  */
864 bool
pvr_device_has_feature(struct pvr_device * pvr_dev,u32 feature)865 pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
866 {
867 	switch (feature) {
868 	case PVR_FEATURE_CLUSTER_GROUPING:
869 		return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
870 
871 	case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
872 		return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
873 
874 	case PVR_FEATURE_FB_CDC_V4:
875 		return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
876 
877 	case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
878 		return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
879 
880 	case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
881 		return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
882 
883 	case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
884 		return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
885 
886 	case PVR_FEATURE_TESSELLATION:
887 		return PVR_HAS_FEATURE(pvr_dev, tessellation);
888 
889 	case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
890 		return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
891 
892 	case PVR_FEATURE_VDM_DRAWINDIRECT:
893 		return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
894 
895 	case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
896 		return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
897 
898 	case PVR_FEATURE_ZLS_SUBTILE:
899 		return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
900 
901 	/* Derived features. */
902 	case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
903 		u8 cdm_control_stream_format = 0;
904 
905 		PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
906 		return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
907 	}
908 
909 	case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
910 		if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
911 			u8 fbcdc_algorithm = 0;
912 
913 			PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
914 			return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
915 		}
916 		return false;
917 
918 	default:
919 		WARN(true, "Looking up undefined feature %u\n", feature);
920 		return false;
921 	}
922 }
923