xref: /linux/drivers/gpu/drm/imagination/pvr_fw_trace.c (revision 6dfebeee296cbb3296f06c28f3b2d053ec8374e7)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_gem.h"
6 #include "pvr_rogue_fwif.h"
7 #include "pvr_rogue_fwif_sf.h"
8 #include "pvr_fw_trace.h"
9 
10 #include <drm/drm_drv.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_print.h>
13 
14 #include <linux/build_bug.h>
15 #include <linux/compiler_attributes.h>
16 #include <linux/dcache.h>
17 #include <linux/debugfs.h>
18 #include <linux/moduleparam.h>
19 #include <linux/sysfs.h>
20 #include <linux/types.h>
21 
22 static int
23 validate_group_mask(struct pvr_device *pvr_dev, const u32 group_mask)
24 {
25 	if (group_mask & ~ROGUE_FWIF_LOG_TYPE_GROUP_MASK) {
26 		drm_warn(from_pvr_device(pvr_dev),
27 			 "Invalid fw_trace group mask 0x%08x (must be a subset of 0x%08x)",
28 			 group_mask, ROGUE_FWIF_LOG_TYPE_GROUP_MASK);
29 		return -EINVAL;
30 	}
31 
32 	return 0;
33 }
34 
35 static inline u32
36 build_log_type(const u32 group_mask)
37 {
38 	if (!group_mask)
39 		return ROGUE_FWIF_LOG_TYPE_NONE;
40 
41 	return group_mask | ROGUE_FWIF_LOG_TYPE_TRACE;
42 }
43 
44 /*
45  * Don't gate this behind CONFIG_DEBUG_FS so that it can be used as an initial
46  * value without further conditional code...
47  */
48 static u32 pvr_fw_trace_init_mask;
49 
50 /*
51  * ...but do only expose the module parameter if debugfs is enabled, since
52  * there's no reason to turn on fw_trace without it.
53  */
54 #if IS_ENABLED(CONFIG_DEBUG_FS)
55 static int
56 pvr_fw_trace_init_mask_set(const char *val, const struct kernel_param *kp)
57 {
58 	u32 mask = 0;
59 	int err;
60 
61 	err = kstrtouint(val, 0, &mask);
62 	if (err)
63 		return err;
64 
65 	err = validate_group_mask(NULL, mask);
66 	if (err)
67 		return err;
68 
69 	*(unsigned int *)kp->arg = mask;
70 
71 	return 0;
72 }
73 
74 const struct kernel_param_ops pvr_fw_trace_init_mask_ops = {
75 	.set = pvr_fw_trace_init_mask_set,
76 	.get = param_get_hexint,
77 };
78 
79 param_check_hexint(init_fw_trace_mask, &pvr_fw_trace_init_mask);
80 module_param_cb(init_fw_trace_mask, &pvr_fw_trace_init_mask_ops, &pvr_fw_trace_init_mask, 0600);
81 __MODULE_PARM_TYPE(init_fw_trace_mask, "hexint");
82 MODULE_PARM_DESC(init_fw_trace_mask,
83 		 "Enable FW trace for the specified groups at device init time");
84 #endif
85 
86 static void
87 tracebuf_ctrl_init(void *cpu_ptr, void *priv)
88 {
89 	struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
90 	struct pvr_fw_trace *fw_trace = priv;
91 
92 	tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
93 	tracebuf_ctrl->tracebuf_flags = 0;
94 	tracebuf_ctrl->log_type = build_log_type(fw_trace->group_mask);
95 
96 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
97 		struct rogue_fwif_tracebuf_space *tracebuf_space =
98 			&tracebuf_ctrl->tracebuf[thread_nr];
99 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
100 
101 		pvr_fw_object_get_fw_addr(trace_buffer->buf_obj,
102 					  &tracebuf_space->trace_buffer_fw_addr);
103 
104 		tracebuf_space->trace_buffer = trace_buffer->buf;
105 		tracebuf_space->trace_pointer = 0;
106 	}
107 }
108 
109 int pvr_fw_trace_init(struct pvr_device *pvr_dev)
110 {
111 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
112 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
113 	int err;
114 
115 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
116 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
117 
118 		trace_buffer->buf =
119 			pvr_fw_object_create_and_map(pvr_dev,
120 						     ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS *
121 						     sizeof(*trace_buffer->buf),
122 						     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
123 						     PVR_BO_FW_NO_CLEAR_ON_RESET,
124 						     NULL, NULL, &trace_buffer->buf_obj);
125 		if (IS_ERR(trace_buffer->buf)) {
126 			drm_err(drm_dev, "Unable to allocate trace buffer\n");
127 			err = PTR_ERR(trace_buffer->buf);
128 			trace_buffer->buf = NULL;
129 			goto err_free_buf;
130 		}
131 	}
132 
133 	/*
134 	 * Load the initial group_mask from the init_fw_trace_mask module
135 	 * parameter. This allows early tracing before the user can write to
136 	 * debugfs. Unlike update_logtype(), we don't set log_type here as that
137 	 * is initialised by tracebuf_ctrl_init().
138 	 */
139 	fw_trace->group_mask = pvr_fw_trace_init_mask;
140 
141 	fw_trace->tracebuf_ctrl =
142 		pvr_fw_object_create_and_map(pvr_dev,
143 					     sizeof(*fw_trace->tracebuf_ctrl),
144 					     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
145 					     PVR_BO_FW_NO_CLEAR_ON_RESET,
146 					     tracebuf_ctrl_init, fw_trace,
147 					     &fw_trace->tracebuf_ctrl_obj);
148 	if (IS_ERR(fw_trace->tracebuf_ctrl)) {
149 		drm_err(drm_dev, "Unable to allocate trace buffer control structure\n");
150 		err = PTR_ERR(fw_trace->tracebuf_ctrl);
151 		goto err_free_buf;
152 	}
153 
154 	BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
155 		     ARRAY_SIZE(fw_trace->buffers));
156 
157 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
158 		struct rogue_fwif_tracebuf_space *tracebuf_space =
159 			&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
160 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
161 
162 		trace_buffer->tracebuf_space = tracebuf_space;
163 	}
164 
165 	return 0;
166 
167 err_free_buf:
168 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
169 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
170 
171 		if (trace_buffer->buf)
172 			pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
173 	}
174 
175 	return err;
176 }
177 
178 void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
179 {
180 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
181 
182 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
183 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
184 
185 		pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
186 	}
187 	pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
188 }
189 
190 /**
191  * update_logtype() - Send KCCB command to trigger FW to update logtype
192  * @pvr_dev: Target PowerVR device
193  * @group_mask: New log group mask; must pass validate_group_mask().
194  *
195  * Returns:
196  *  * 0 if the provided @group_mask is the same as the current value (this is a
197  *    short-circuit evaluation),
198  *  * 0 on success,
199  *  * Any error returned by pvr_kccb_send_cmd(), or
200  *  * -%EIO if the device is lost.
201  */
202 static int
203 update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
204 {
205 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
206 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
207 	struct rogue_fwif_kccb_cmd cmd;
208 	int idx;
209 	int err;
210 
211 	/* No change in group_mask => nothing to update. */
212 	if (fw_trace->group_mask == group_mask)
213 		return 0;
214 
215 	fw_trace->group_mask = group_mask;
216 	fw_trace->tracebuf_ctrl->log_type = build_log_type(group_mask);
217 
218 	down_read(&pvr_dev->reset_sem);
219 	if (!drm_dev_enter(drm_dev, &idx)) {
220 		err = -EIO;
221 		goto err_up_read;
222 	}
223 
224 	cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE;
225 	cmd.kccb_flags = 0;
226 
227 	err = pvr_kccb_send_cmd(pvr_dev, &cmd, NULL);
228 
229 	drm_dev_exit(idx);
230 
231 err_up_read:
232 	up_read(&pvr_dev->reset_sem);
233 
234 	return err;
235 }
236 
237 struct pvr_fw_trace_seq_data {
238 	/** @buffer: Pointer to copy of trace data. */
239 	u32 *buffer;
240 
241 	/** @start_offset: Starting offset in trace data, as reported by FW. */
242 	u32 start_offset;
243 
244 	/** @idx: Current index into trace data. */
245 	u32 idx;
246 
247 	/** @assert_buf: Trace assert buffer, as reported by FW. */
248 	struct rogue_fwif_file_info_buf assert_buf;
249 };
250 
251 static u32 find_sfid(u32 id)
252 {
253 	for (u32 i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
254 		if (stid_fmts[i].id == id)
255 			return i;
256 	}
257 
258 	return ROGUE_FW_SF_LAST;
259 }
260 
261 static u32 read_fw_trace(struct pvr_fw_trace_seq_data *trace_seq_data, u32 offset)
262 {
263 	u32 idx;
264 
265 	idx = trace_seq_data->idx + offset;
266 	if (idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
267 		return 0;
268 
269 	idx = (idx + trace_seq_data->start_offset) % ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
270 	return trace_seq_data->buffer[idx];
271 }
272 
273 /**
274  * fw_trace_get_next() - Advance trace index to next entry
275  * @trace_seq_data: Trace sequence data.
276  *
277  * Returns:
278  *  * %true if trace index is now pointing to a valid entry, or
279  *  * %false if trace index is pointing to an invalid entry, or has hit the end
280  *    of the trace.
281  */
282 static bool fw_trace_get_next(struct pvr_fw_trace_seq_data *trace_seq_data)
283 {
284 	u32 id, sf_id;
285 
286 	while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
287 		id = read_fw_trace(trace_seq_data, 0);
288 		trace_seq_data->idx++;
289 		if (!ROGUE_FW_LOG_VALIDID(id))
290 			continue;
291 		if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
292 			/* Assertion failure marks the end of the trace. */
293 			return false;
294 		}
295 
296 		sf_id = find_sfid(id);
297 		if (sf_id == ROGUE_FW_SF_FIRST)
298 			continue;
299 		if (sf_id == ROGUE_FW_SF_LAST) {
300 			/*
301 			 * Could not match with an ID in the SF table, trace is
302 			 * most likely corrupt from this point.
303 			 */
304 			return false;
305 		}
306 
307 		/* Skip over the timestamp, and any parameters. */
308 		trace_seq_data->idx += 2 + ROGUE_FW_SF_PARAMNUM(id);
309 
310 		/* Ensure index is now pointing to a valid trace entry. */
311 		id = read_fw_trace(trace_seq_data, 0);
312 		if (!ROGUE_FW_LOG_VALIDID(id))
313 			continue;
314 
315 		return true;
316 	}
317 
318 	/* Hit end of trace data. */
319 	return false;
320 }
321 
322 /**
323  * fw_trace_get_first() - Find first valid entry in trace
324  * @trace_seq_data: Trace sequence data.
325  *
326  * Skips over invalid (usually zero) and ROGUE_FW_SF_FIRST entries.
327  *
328  * If the trace has no valid entries, this function will exit with the trace
329  * index pointing to the end of the trace. trace_seq_show() will return an error
330  * in this state.
331  */
332 static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
333 {
334 	trace_seq_data->idx = 0;
335 
336 	while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
337 		u32 id = read_fw_trace(trace_seq_data, 0);
338 
339 		if (ROGUE_FW_LOG_VALIDID(id)) {
340 			u32 sf_id = find_sfid(id);
341 
342 			if (sf_id != ROGUE_FW_SF_FIRST)
343 				break;
344 		}
345 		trace_seq_data->idx++;
346 	}
347 }
348 
349 static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
350 {
351 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
352 
353 	/* Reset trace index, then advance to *pos. */
354 	fw_trace_get_first(trace_seq_data);
355 
356 	for (u32 i = 0; i < *pos; i++) {
357 		if (!fw_trace_get_next(trace_seq_data))
358 			return NULL;
359 	}
360 
361 	return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
362 }
363 
364 static void *fw_trace_seq_next(struct seq_file *s, void *v, loff_t *pos)
365 {
366 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
367 
368 	(*pos)++;
369 	if (!fw_trace_get_next(trace_seq_data))
370 		return NULL;
371 
372 	return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
373 }
374 
375 static void fw_trace_seq_stop(struct seq_file *s, void *v)
376 {
377 }
378 
379 static int fw_trace_seq_show(struct seq_file *s, void *v)
380 {
381 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
382 	u64 timestamp;
383 	u32 id;
384 	u32 sf_id;
385 
386 	if (trace_seq_data->idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
387 		return -EINVAL;
388 
389 	id = read_fw_trace(trace_seq_data, 0);
390 	/* Index is not pointing at a valid entry. */
391 	if (!ROGUE_FW_LOG_VALIDID(id))
392 		return -EINVAL;
393 
394 	sf_id = find_sfid(id);
395 	/* Index is not pointing at a valid entry. */
396 	if (sf_id == ROGUE_FW_SF_LAST)
397 		return -EINVAL;
398 
399 	timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
400 		read_fw_trace(trace_seq_data, 2);
401 	timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
402 		ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
403 
404 	seq_printf(s, "[%llu] : ", timestamp);
405 	if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
406 		seq_printf(s, "ASSERTION %s failed at %s:%u",
407 			   trace_seq_data->assert_buf.info,
408 			   trace_seq_data->assert_buf.path,
409 			   trace_seq_data->assert_buf.line_num);
410 	} else {
411 		seq_printf(s, stid_fmts[sf_id].name,
412 			   read_fw_trace(trace_seq_data, 3),
413 			   read_fw_trace(trace_seq_data, 4),
414 			   read_fw_trace(trace_seq_data, 5),
415 			   read_fw_trace(trace_seq_data, 6),
416 			   read_fw_trace(trace_seq_data, 7),
417 			   read_fw_trace(trace_seq_data, 8),
418 			   read_fw_trace(trace_seq_data, 9),
419 			   read_fw_trace(trace_seq_data, 10),
420 			   read_fw_trace(trace_seq_data, 11),
421 			   read_fw_trace(trace_seq_data, 12),
422 			   read_fw_trace(trace_seq_data, 13),
423 			   read_fw_trace(trace_seq_data, 14),
424 			   read_fw_trace(trace_seq_data, 15),
425 			   read_fw_trace(trace_seq_data, 16),
426 			   read_fw_trace(trace_seq_data, 17),
427 			   read_fw_trace(trace_seq_data, 18),
428 			   read_fw_trace(trace_seq_data, 19),
429 			   read_fw_trace(trace_seq_data, 20),
430 			   read_fw_trace(trace_seq_data, 21),
431 			   read_fw_trace(trace_seq_data, 22));
432 	}
433 	seq_puts(s, "\n");
434 	return 0;
435 }
436 
437 static const struct seq_operations pvr_fw_trace_seq_ops = {
438 	.start = fw_trace_seq_start,
439 	.next = fw_trace_seq_next,
440 	.stop = fw_trace_seq_stop,
441 	.show = fw_trace_seq_show
442 };
443 
444 static int fw_trace_open(struct inode *inode, struct file *file)
445 {
446 	struct pvr_fw_trace_buffer *trace_buffer = inode->i_private;
447 	struct rogue_fwif_tracebuf_space *tracebuf_space =
448 		trace_buffer->tracebuf_space;
449 	struct pvr_fw_trace_seq_data *trace_seq_data;
450 	int err;
451 
452 	trace_seq_data = kzalloc(sizeof(*trace_seq_data), GFP_KERNEL);
453 	if (!trace_seq_data)
454 		return -ENOMEM;
455 
456 	trace_seq_data->buffer = kcalloc(ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS,
457 					 sizeof(*trace_seq_data->buffer), GFP_KERNEL);
458 	if (!trace_seq_data->buffer) {
459 		err = -ENOMEM;
460 		goto err_free_data;
461 	}
462 
463 	/*
464 	 * Take a local copy of the trace buffer, as firmware may still be
465 	 * writing to it. This will exist as long as this file is open.
466 	 */
467 	memcpy(trace_seq_data->buffer, trace_buffer->buf,
468 	       ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS * sizeof(u32));
469 	trace_seq_data->start_offset = READ_ONCE(tracebuf_space->trace_pointer);
470 	trace_seq_data->assert_buf = tracebuf_space->assert_buf;
471 	fw_trace_get_first(trace_seq_data);
472 
473 	err = seq_open(file, &pvr_fw_trace_seq_ops);
474 	if (err)
475 		goto err_free_buffer;
476 
477 	((struct seq_file *)file->private_data)->private = trace_seq_data;
478 
479 	return 0;
480 
481 err_free_buffer:
482 	kfree(trace_seq_data->buffer);
483 
484 err_free_data:
485 	kfree(trace_seq_data);
486 
487 	return err;
488 }
489 
490 static int fw_trace_release(struct inode *inode, struct file *file)
491 {
492 	struct pvr_fw_trace_seq_data *trace_seq_data =
493 		((struct seq_file *)file->private_data)->private;
494 
495 	seq_release(inode, file);
496 	kfree(trace_seq_data->buffer);
497 	kfree(trace_seq_data);
498 
499 	return 0;
500 }
501 
502 static const struct file_operations pvr_fw_trace_fops = {
503 	.owner = THIS_MODULE,
504 	.open = fw_trace_open,
505 	.read = seq_read,
506 	.llseek = seq_lseek,
507 	.release = fw_trace_release,
508 };
509 
510 static int pvr_fw_trace_mask_get(void *data, u64 *value)
511 {
512 	struct pvr_device *pvr_dev = data;
513 
514 	*value = pvr_dev->fw_dev.fw_trace.group_mask;
515 
516 	return 0;
517 }
518 
519 static int pvr_fw_trace_mask_set(void *data, u64 value)
520 {
521 	struct pvr_device *pvr_dev = data;
522 	const u32 group_mask = (u32)value;
523 	int err;
524 
525 	err = validate_group_mask(pvr_dev, group_mask);
526 	if (err)
527 		return err;
528 
529 	return update_logtype(pvr_dev, group_mask);
530 }
531 
532 DEFINE_DEBUGFS_ATTRIBUTE(pvr_fw_trace_mask_fops, pvr_fw_trace_mask_get,
533 			 pvr_fw_trace_mask_set, "0x%08llx\n");
534 
535 void
536 pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
537 {
538 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
539 
540 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
541 		return;
542 
543 	static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
544 		      "The filename buffer is only large enough for a single-digit thread count");
545 
546 	for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
547 		char filename[8];
548 
549 		snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
550 		debugfs_create_file(filename, 0400, dir,
551 				    &fw_trace->buffers[thread_nr],
552 				    &pvr_fw_trace_fops);
553 	}
554 
555 	debugfs_create_file("trace_mask", 0600, dir, fw_trace,
556 			    &pvr_fw_trace_mask_fops);
557 }
558