xref: /linux/drivers/gpu/drm/imagination/pvr_fw_trace.c (revision 6704d98a4f48b7424edc0f7ae2a06c0a8af02e2f)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_gem.h"
6 #include "pvr_rogue_fwif.h"
7 #include "pvr_rogue_fwif_sf.h"
8 #include "pvr_fw_trace.h"
9 
10 #include <drm/drm_drv.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_print.h>
13 
14 #include <linux/build_bug.h>
15 #include <linux/compiler_attributes.h>
16 #include <linux/dcache.h>
17 #include <linux/debugfs.h>
18 #include <linux/moduleparam.h>
19 #include <linux/sysfs.h>
20 #include <linux/types.h>
21 
22 static int
23 validate_group_mask(struct pvr_device *pvr_dev, const u32 group_mask)
24 {
25 	if (group_mask & ~ROGUE_FWIF_LOG_TYPE_GROUP_MASK) {
26 		drm_warn(from_pvr_device(pvr_dev),
27 			 "Invalid fw_trace group mask 0x%08x (must be a subset of 0x%08x)",
28 			 group_mask, ROGUE_FWIF_LOG_TYPE_GROUP_MASK);
29 		return -EINVAL;
30 	}
31 
32 	return 0;
33 }
34 
35 static inline u32
36 build_log_type(const u32 group_mask)
37 {
38 	if (!group_mask)
39 		return ROGUE_FWIF_LOG_TYPE_NONE;
40 
41 	return group_mask | ROGUE_FWIF_LOG_TYPE_TRACE;
42 }
43 
44 /*
45  * Don't gate this behind CONFIG_DEBUG_FS so that it can be used as an initial
46  * value without further conditional code...
47  */
48 static u32 pvr_fw_trace_init_mask;
49 
50 /*
51  * ...but do only expose the module parameter if debugfs is enabled, since
52  * there's no reason to turn on fw_trace without it.
53  */
54 #if IS_ENABLED(CONFIG_DEBUG_FS)
55 static int
56 pvr_fw_trace_init_mask_set(const char *val, const struct kernel_param *kp)
57 {
58 	u32 mask = 0;
59 	int err;
60 
61 	err = kstrtouint(val, 0, &mask);
62 	if (err)
63 		return err;
64 
65 	err = validate_group_mask(NULL, mask);
66 	if (err)
67 		return err;
68 
69 	*(unsigned int *)kp->arg = mask;
70 
71 	return 0;
72 }
73 
74 const struct kernel_param_ops pvr_fw_trace_init_mask_ops = {
75 	.set = pvr_fw_trace_init_mask_set,
76 	.get = param_get_hexint,
77 };
78 
79 param_check_hexint(init_fw_trace_mask, &pvr_fw_trace_init_mask);
80 module_param_cb(init_fw_trace_mask, &pvr_fw_trace_init_mask_ops, &pvr_fw_trace_init_mask, 0600);
81 __MODULE_PARM_TYPE(init_fw_trace_mask, "hexint");
82 MODULE_PARM_DESC(init_fw_trace_mask,
83 		 "Enable FW trace for the specified groups at device init time");
84 #endif
85 
86 static void
87 tracebuf_ctrl_init(void *cpu_ptr, void *priv)
88 {
89 	struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
90 	struct pvr_fw_trace *fw_trace = priv;
91 
92 	tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
93 	tracebuf_ctrl->tracebuf_flags = 0;
94 	tracebuf_ctrl->log_type = build_log_type(fw_trace->group_mask);
95 
96 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
97 		struct rogue_fwif_tracebuf_space *tracebuf_space =
98 			&tracebuf_ctrl->tracebuf[thread_nr];
99 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
100 
101 		pvr_fw_object_get_fw_addr(trace_buffer->buf_obj,
102 					  &tracebuf_space->trace_buffer_fw_addr);
103 
104 		tracebuf_space->trace_buffer = trace_buffer->buf;
105 		tracebuf_space->trace_pointer = 0;
106 	}
107 }
108 
109 int pvr_fw_trace_init(struct pvr_device *pvr_dev)
110 {
111 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
112 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
113 	int err;
114 
115 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
116 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
117 
118 		trace_buffer->buf =
119 			pvr_fw_object_create_and_map(pvr_dev,
120 						     ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS *
121 						     sizeof(*trace_buffer->buf),
122 						     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
123 						     PVR_BO_FW_NO_CLEAR_ON_RESET,
124 						     NULL, NULL, &trace_buffer->buf_obj);
125 		if (IS_ERR(trace_buffer->buf)) {
126 			drm_err(drm_dev, "Unable to allocate trace buffer\n");
127 			err = PTR_ERR(trace_buffer->buf);
128 			trace_buffer->buf = NULL;
129 			goto err_free_buf;
130 		}
131 	}
132 
133 	/*
134 	 * Load the initial group_mask from the init_fw_trace_mask module
135 	 * parameter. This allows early tracing before the user can write to
136 	 * debugfs. Unlike update_logtype(), we don't set log_type here as that
137 	 * is initialised by tracebuf_ctrl_init().
138 	 */
139 	fw_trace->group_mask = pvr_fw_trace_init_mask;
140 
141 	fw_trace->tracebuf_ctrl =
142 		pvr_fw_object_create_and_map(pvr_dev,
143 					     sizeof(*fw_trace->tracebuf_ctrl),
144 					     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
145 					     PVR_BO_FW_NO_CLEAR_ON_RESET,
146 					     tracebuf_ctrl_init, fw_trace,
147 					     &fw_trace->tracebuf_ctrl_obj);
148 	if (IS_ERR(fw_trace->tracebuf_ctrl)) {
149 		drm_err(drm_dev, "Unable to allocate trace buffer control structure\n");
150 		err = PTR_ERR(fw_trace->tracebuf_ctrl);
151 		goto err_free_buf;
152 	}
153 
154 	BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
155 		     ARRAY_SIZE(fw_trace->buffers));
156 
157 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
158 		struct rogue_fwif_tracebuf_space *tracebuf_space =
159 			&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
160 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
161 
162 		trace_buffer->tracebuf_space = tracebuf_space;
163 	}
164 
165 	return 0;
166 
167 err_free_buf:
168 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
169 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
170 
171 		if (trace_buffer->buf)
172 			pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
173 	}
174 
175 	return err;
176 }
177 
178 void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
179 {
180 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
181 
182 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
183 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
184 
185 		pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
186 	}
187 	pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
188 }
189 
190 /**
191  * update_logtype() - Send KCCB command to trigger FW to update logtype
192  * @pvr_dev: Target PowerVR device
193  * @group_mask: New log group mask; must pass validate_group_mask().
194  *
195  * Returns:
196  *  * 0 if the provided @group_mask is the same as the current value (this is a
197  *    short-circuit evaluation),
198  *  * 0 on success,
199  *  * Any error returned by pvr_kccb_send_cmd(), or
200  *  * -%EIO if the device is lost.
201  */
202 static int
203 update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
204 {
205 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
206 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
207 	struct rogue_fwif_kccb_cmd cmd;
208 	int idx;
209 	int err;
210 	int slot;
211 
212 	/* No change in group_mask => nothing to update. */
213 	if (fw_trace->group_mask == group_mask)
214 		return 0;
215 
216 	fw_trace->group_mask = group_mask;
217 	fw_trace->tracebuf_ctrl->log_type = build_log_type(group_mask);
218 
219 	down_read(&pvr_dev->reset_sem);
220 	if (!drm_dev_enter(drm_dev, &idx)) {
221 		err = -EIO;
222 		goto err_up_read;
223 	}
224 
225 	cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE;
226 	cmd.kccb_flags = 0;
227 
228 	err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot);
229 	if (err)
230 		goto err_drm_dev_exit;
231 
232 	err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL);
233 
234 err_drm_dev_exit:
235 	drm_dev_exit(idx);
236 
237 err_up_read:
238 	up_read(&pvr_dev->reset_sem);
239 
240 	return err;
241 }
242 
243 struct pvr_fw_trace_seq_data {
244 	/** @buffer: Pointer to copy of trace data. */
245 	u32 *buffer;
246 
247 	/** @start_offset: Starting offset in trace data, as reported by FW. */
248 	u32 start_offset;
249 
250 	/** @idx: Current index into trace data. */
251 	u32 idx;
252 
253 	/** @assert_buf: Trace assert buffer, as reported by FW. */
254 	struct rogue_fwif_file_info_buf assert_buf;
255 };
256 
257 static u32 find_sfid(u32 id)
258 {
259 	for (u32 i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
260 		if (stid_fmts[i].id == id)
261 			return i;
262 	}
263 
264 	return ROGUE_FW_SF_LAST;
265 }
266 
267 static u32 read_fw_trace(struct pvr_fw_trace_seq_data *trace_seq_data, u32 offset)
268 {
269 	u32 idx;
270 
271 	idx = trace_seq_data->idx + offset;
272 	if (idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
273 		return 0;
274 
275 	idx = (idx + trace_seq_data->start_offset) % ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
276 	return trace_seq_data->buffer[idx];
277 }
278 
279 /**
280  * fw_trace_get_next() - Advance trace index to next entry
281  * @trace_seq_data: Trace sequence data.
282  *
283  * Returns:
284  *  * %true if trace index is now pointing to a valid entry, or
285  *  * %false if trace index is pointing to an invalid entry, or has hit the end
286  *    of the trace.
287  */
288 static bool fw_trace_get_next(struct pvr_fw_trace_seq_data *trace_seq_data)
289 {
290 	u32 id, sf_id;
291 
292 	while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
293 		id = read_fw_trace(trace_seq_data, 0);
294 		trace_seq_data->idx++;
295 		if (!ROGUE_FW_LOG_VALIDID(id))
296 			continue;
297 		if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
298 			/* Assertion failure marks the end of the trace. */
299 			return false;
300 		}
301 
302 		sf_id = find_sfid(id);
303 		if (sf_id == ROGUE_FW_SF_FIRST)
304 			continue;
305 		if (sf_id == ROGUE_FW_SF_LAST) {
306 			/*
307 			 * Could not match with an ID in the SF table, trace is
308 			 * most likely corrupt from this point.
309 			 */
310 			return false;
311 		}
312 
313 		/* Skip over the timestamp, and any parameters. */
314 		trace_seq_data->idx += 2 + ROGUE_FW_SF_PARAMNUM(id);
315 
316 		/* Ensure index is now pointing to a valid trace entry. */
317 		id = read_fw_trace(trace_seq_data, 0);
318 		if (!ROGUE_FW_LOG_VALIDID(id))
319 			continue;
320 
321 		return true;
322 	}
323 
324 	/* Hit end of trace data. */
325 	return false;
326 }
327 
328 /**
329  * fw_trace_get_first() - Find first valid entry in trace
330  * @trace_seq_data: Trace sequence data.
331  *
332  * Skips over invalid (usually zero) and ROGUE_FW_SF_FIRST entries.
333  *
334  * If the trace has no valid entries, this function will exit with the trace
335  * index pointing to the end of the trace. trace_seq_show() will return an error
336  * in this state.
337  */
338 static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
339 {
340 	trace_seq_data->idx = 0;
341 
342 	while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
343 		u32 id = read_fw_trace(trace_seq_data, 0);
344 
345 		if (ROGUE_FW_LOG_VALIDID(id)) {
346 			u32 sf_id = find_sfid(id);
347 
348 			if (sf_id != ROGUE_FW_SF_FIRST)
349 				break;
350 		}
351 		trace_seq_data->idx++;
352 	}
353 }
354 
355 static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
356 {
357 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
358 
359 	/* Reset trace index, then advance to *pos. */
360 	fw_trace_get_first(trace_seq_data);
361 
362 	for (u32 i = 0; i < *pos; i++) {
363 		if (!fw_trace_get_next(trace_seq_data))
364 			return NULL;
365 	}
366 
367 	return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
368 }
369 
370 static void *fw_trace_seq_next(struct seq_file *s, void *v, loff_t *pos)
371 {
372 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
373 
374 	(*pos)++;
375 	if (!fw_trace_get_next(trace_seq_data))
376 		return NULL;
377 
378 	return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
379 }
380 
381 static void fw_trace_seq_stop(struct seq_file *s, void *v)
382 {
383 }
384 
385 static int fw_trace_seq_show(struct seq_file *s, void *v)
386 {
387 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
388 	u64 timestamp;
389 	u32 id;
390 	u32 sf_id;
391 
392 	if (trace_seq_data->idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
393 		return -EINVAL;
394 
395 	id = read_fw_trace(trace_seq_data, 0);
396 	/* Index is not pointing at a valid entry. */
397 	if (!ROGUE_FW_LOG_VALIDID(id))
398 		return -EINVAL;
399 
400 	sf_id = find_sfid(id);
401 	/* Index is not pointing at a valid entry. */
402 	if (sf_id == ROGUE_FW_SF_LAST)
403 		return -EINVAL;
404 
405 	timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
406 		read_fw_trace(trace_seq_data, 2);
407 	timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
408 		ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
409 
410 	seq_printf(s, "[%llu] : ", timestamp);
411 	if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
412 		seq_printf(s, "ASSERTION %s failed at %s:%u",
413 			   trace_seq_data->assert_buf.info,
414 			   trace_seq_data->assert_buf.path,
415 			   trace_seq_data->assert_buf.line_num);
416 	} else {
417 		seq_printf(s, stid_fmts[sf_id].name,
418 			   read_fw_trace(trace_seq_data, 3),
419 			   read_fw_trace(trace_seq_data, 4),
420 			   read_fw_trace(trace_seq_data, 5),
421 			   read_fw_trace(trace_seq_data, 6),
422 			   read_fw_trace(trace_seq_data, 7),
423 			   read_fw_trace(trace_seq_data, 8),
424 			   read_fw_trace(trace_seq_data, 9),
425 			   read_fw_trace(trace_seq_data, 10),
426 			   read_fw_trace(trace_seq_data, 11),
427 			   read_fw_trace(trace_seq_data, 12),
428 			   read_fw_trace(trace_seq_data, 13),
429 			   read_fw_trace(trace_seq_data, 14),
430 			   read_fw_trace(trace_seq_data, 15),
431 			   read_fw_trace(trace_seq_data, 16),
432 			   read_fw_trace(trace_seq_data, 17),
433 			   read_fw_trace(trace_seq_data, 18),
434 			   read_fw_trace(trace_seq_data, 19),
435 			   read_fw_trace(trace_seq_data, 20),
436 			   read_fw_trace(trace_seq_data, 21),
437 			   read_fw_trace(trace_seq_data, 22));
438 	}
439 	seq_puts(s, "\n");
440 	return 0;
441 }
442 
443 static const struct seq_operations pvr_fw_trace_seq_ops = {
444 	.start = fw_trace_seq_start,
445 	.next = fw_trace_seq_next,
446 	.stop = fw_trace_seq_stop,
447 	.show = fw_trace_seq_show
448 };
449 
450 static int fw_trace_open(struct inode *inode, struct file *file)
451 {
452 	struct pvr_fw_trace_buffer *trace_buffer = inode->i_private;
453 	struct rogue_fwif_tracebuf_space *tracebuf_space =
454 		trace_buffer->tracebuf_space;
455 	struct pvr_fw_trace_seq_data *trace_seq_data;
456 	int err;
457 
458 	trace_seq_data = kzalloc(sizeof(*trace_seq_data), GFP_KERNEL);
459 	if (!trace_seq_data)
460 		return -ENOMEM;
461 
462 	trace_seq_data->buffer = kcalloc(ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS,
463 					 sizeof(*trace_seq_data->buffer), GFP_KERNEL);
464 	if (!trace_seq_data->buffer) {
465 		err = -ENOMEM;
466 		goto err_free_data;
467 	}
468 
469 	/*
470 	 * Take a local copy of the trace buffer, as firmware may still be
471 	 * writing to it. This will exist as long as this file is open.
472 	 */
473 	memcpy(trace_seq_data->buffer, trace_buffer->buf,
474 	       ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS * sizeof(u32));
475 	trace_seq_data->start_offset = READ_ONCE(tracebuf_space->trace_pointer);
476 	trace_seq_data->assert_buf = tracebuf_space->assert_buf;
477 	fw_trace_get_first(trace_seq_data);
478 
479 	err = seq_open(file, &pvr_fw_trace_seq_ops);
480 	if (err)
481 		goto err_free_buffer;
482 
483 	((struct seq_file *)file->private_data)->private = trace_seq_data;
484 
485 	return 0;
486 
487 err_free_buffer:
488 	kfree(trace_seq_data->buffer);
489 
490 err_free_data:
491 	kfree(trace_seq_data);
492 
493 	return err;
494 }
495 
496 static int fw_trace_release(struct inode *inode, struct file *file)
497 {
498 	struct pvr_fw_trace_seq_data *trace_seq_data =
499 		((struct seq_file *)file->private_data)->private;
500 
501 	seq_release(inode, file);
502 	kfree(trace_seq_data->buffer);
503 	kfree(trace_seq_data);
504 
505 	return 0;
506 }
507 
508 static const struct file_operations pvr_fw_trace_fops = {
509 	.owner = THIS_MODULE,
510 	.open = fw_trace_open,
511 	.read = seq_read,
512 	.llseek = seq_lseek,
513 	.release = fw_trace_release,
514 };
515 
516 static int pvr_fw_trace_mask_get(void *data, u64 *value)
517 {
518 	struct pvr_device *pvr_dev = data;
519 
520 	*value = pvr_dev->fw_dev.fw_trace.group_mask;
521 
522 	return 0;
523 }
524 
525 static int pvr_fw_trace_mask_set(void *data, u64 value)
526 {
527 	struct pvr_device *pvr_dev = data;
528 	const u32 group_mask = (u32)value;
529 	int err;
530 
531 	err = validate_group_mask(pvr_dev, group_mask);
532 	if (err)
533 		return err;
534 
535 	return update_logtype(pvr_dev, group_mask);
536 }
537 
538 DEFINE_DEBUGFS_ATTRIBUTE(pvr_fw_trace_mask_fops, pvr_fw_trace_mask_get,
539 			 pvr_fw_trace_mask_set, "0x%08llx\n");
540 
541 void
542 pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
543 {
544 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
545 
546 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
547 		return;
548 
549 	static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
550 		      "The filename buffer is only large enough for a single-digit thread count");
551 
552 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
553 		char filename[8];
554 
555 		snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
556 		debugfs_create_file(filename, 0400, dir,
557 				    &fw_trace->buffers[thread_nr],
558 				    &pvr_fw_trace_fops);
559 	}
560 
561 	debugfs_create_file("trace_mask", 0600, dir, fw_trace,
562 			    &pvr_fw_trace_mask_fops);
563 }
564