xref: /linux/drivers/gpu/drm/imagination/pvr_fw_trace.c (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_gem.h"
6 #include "pvr_rogue_fwif.h"
7 #include "pvr_rogue_fwif_sf.h"
8 #include "pvr_fw_trace.h"
9 
10 #include <drm/drm_drv.h>
11 #include <drm/drm_file.h>
12 
13 #include <linux/build_bug.h>
14 #include <linux/dcache.h>
15 #include <linux/debugfs.h>
16 #include <linux/sysfs.h>
17 #include <linux/types.h>
18 
19 static void
tracebuf_ctrl_init(void * cpu_ptr,void * priv)20 tracebuf_ctrl_init(void *cpu_ptr, void *priv)
21 {
22 	struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
23 	struct pvr_fw_trace *fw_trace = priv;
24 	u32 thread_nr;
25 
26 	tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
27 	tracebuf_ctrl->tracebuf_flags = 0;
28 
29 	if (fw_trace->group_mask)
30 		tracebuf_ctrl->log_type = fw_trace->group_mask | ROGUE_FWIF_LOG_TYPE_TRACE;
31 	else
32 		tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
33 
34 	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
35 		struct rogue_fwif_tracebuf_space *tracebuf_space =
36 			&tracebuf_ctrl->tracebuf[thread_nr];
37 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
38 
39 		pvr_fw_object_get_fw_addr(trace_buffer->buf_obj,
40 					  &tracebuf_space->trace_buffer_fw_addr);
41 
42 		tracebuf_space->trace_buffer = trace_buffer->buf;
43 		tracebuf_space->trace_pointer = 0;
44 	}
45 }
46 
pvr_fw_trace_init(struct pvr_device * pvr_dev)47 int pvr_fw_trace_init(struct pvr_device *pvr_dev)
48 {
49 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
50 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
51 	u32 thread_nr;
52 	int err;
53 
54 	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
55 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
56 
57 		trace_buffer->buf =
58 			pvr_fw_object_create_and_map(pvr_dev,
59 						     ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS *
60 						     sizeof(*trace_buffer->buf),
61 						     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
62 						     PVR_BO_FW_NO_CLEAR_ON_RESET,
63 						     NULL, NULL, &trace_buffer->buf_obj);
64 		if (IS_ERR(trace_buffer->buf)) {
65 			drm_err(drm_dev, "Unable to allocate trace buffer\n");
66 			err = PTR_ERR(trace_buffer->buf);
67 			trace_buffer->buf = NULL;
68 			goto err_free_buf;
69 		}
70 	}
71 
72 	/* TODO: Provide control of group mask. */
73 	fw_trace->group_mask = 0;
74 
75 	fw_trace->tracebuf_ctrl =
76 		pvr_fw_object_create_and_map(pvr_dev,
77 					     sizeof(*fw_trace->tracebuf_ctrl),
78 					     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
79 					     PVR_BO_FW_NO_CLEAR_ON_RESET,
80 					     tracebuf_ctrl_init, fw_trace,
81 					     &fw_trace->tracebuf_ctrl_obj);
82 	if (IS_ERR(fw_trace->tracebuf_ctrl)) {
83 		drm_err(drm_dev, "Unable to allocate trace buffer control structure\n");
84 		err = PTR_ERR(fw_trace->tracebuf_ctrl);
85 		goto err_free_buf;
86 	}
87 
88 	BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
89 		     ARRAY_SIZE(fw_trace->buffers));
90 
91 	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
92 		struct rogue_fwif_tracebuf_space *tracebuf_space =
93 			&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
94 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
95 
96 		trace_buffer->tracebuf_space = tracebuf_space;
97 	}
98 
99 	return 0;
100 
101 err_free_buf:
102 	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
103 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
104 
105 		if (trace_buffer->buf)
106 			pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
107 	}
108 
109 	return err;
110 }
111 
pvr_fw_trace_fini(struct pvr_device * pvr_dev)112 void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
113 {
114 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
115 	u32 thread_nr;
116 
117 	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
118 		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
119 
120 		pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
121 	}
122 	pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
123 }
124 
125 #if defined(CONFIG_DEBUG_FS)
126 
127 /**
128  * update_logtype() - Send KCCB command to trigger FW to update logtype
129  * @pvr_dev: Target PowerVR device
130  * @group_mask: New log group mask.
131  *
132  * Returns:
133  *  * 0 on success,
134  *  * Any error returned by pvr_kccb_send_cmd(), or
135  *  * -%EIO if the device is lost.
136  */
137 static int
update_logtype(struct pvr_device * pvr_dev,u32 group_mask)138 update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
139 {
140 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
141 	struct rogue_fwif_kccb_cmd cmd;
142 	int idx;
143 	int err;
144 
145 	if (group_mask)
146 		fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask;
147 	else
148 		fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
149 
150 	fw_trace->group_mask = group_mask;
151 
152 	down_read(&pvr_dev->reset_sem);
153 	if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
154 		err = -EIO;
155 		goto err_up_read;
156 	}
157 
158 	cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE;
159 	cmd.kccb_flags = 0;
160 
161 	err = pvr_kccb_send_cmd(pvr_dev, &cmd, NULL);
162 
163 	drm_dev_exit(idx);
164 
165 err_up_read:
166 	up_read(&pvr_dev->reset_sem);
167 
168 	return err;
169 }
170 
171 struct pvr_fw_trace_seq_data {
172 	/** @buffer: Pointer to copy of trace data. */
173 	u32 *buffer;
174 
175 	/** @start_offset: Starting offset in trace data, as reported by FW. */
176 	u32 start_offset;
177 
178 	/** @idx: Current index into trace data. */
179 	u32 idx;
180 
181 	/** @assert_buf: Trace assert buffer, as reported by FW. */
182 	struct rogue_fwif_file_info_buf assert_buf;
183 };
184 
find_sfid(u32 id)185 static u32 find_sfid(u32 id)
186 {
187 	u32 i;
188 
189 	for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
190 		if (stid_fmts[i].id == id)
191 			return i;
192 	}
193 
194 	return ROGUE_FW_SF_LAST;
195 }
196 
read_fw_trace(struct pvr_fw_trace_seq_data * trace_seq_data,u32 offset)197 static u32 read_fw_trace(struct pvr_fw_trace_seq_data *trace_seq_data, u32 offset)
198 {
199 	u32 idx;
200 
201 	idx = trace_seq_data->idx + offset;
202 	if (idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
203 		return 0;
204 
205 	idx = (idx + trace_seq_data->start_offset) % ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
206 	return trace_seq_data->buffer[idx];
207 }
208 
209 /**
210  * fw_trace_get_next() - Advance trace index to next entry
211  * @trace_seq_data: Trace sequence data.
212  *
213  * Returns:
214  *  * %true if trace index is now pointing to a valid entry, or
215  *  * %false if trace index is pointing to an invalid entry, or has hit the end
216  *    of the trace.
217  */
fw_trace_get_next(struct pvr_fw_trace_seq_data * trace_seq_data)218 static bool fw_trace_get_next(struct pvr_fw_trace_seq_data *trace_seq_data)
219 {
220 	u32 id, sf_id;
221 
222 	while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
223 		id = read_fw_trace(trace_seq_data, 0);
224 		trace_seq_data->idx++;
225 		if (!ROGUE_FW_LOG_VALIDID(id))
226 			continue;
227 		if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
228 			/* Assertion failure marks the end of the trace. */
229 			return false;
230 		}
231 
232 		sf_id = find_sfid(id);
233 		if (sf_id == ROGUE_FW_SF_FIRST)
234 			continue;
235 		if (sf_id == ROGUE_FW_SF_LAST) {
236 			/*
237 			 * Could not match with an ID in the SF table, trace is
238 			 * most likely corrupt from this point.
239 			 */
240 			return false;
241 		}
242 
243 		/* Skip over the timestamp, and any parameters. */
244 		trace_seq_data->idx += 2 + ROGUE_FW_SF_PARAMNUM(id);
245 
246 		/* Ensure index is now pointing to a valid trace entry. */
247 		id = read_fw_trace(trace_seq_data, 0);
248 		if (!ROGUE_FW_LOG_VALIDID(id))
249 			continue;
250 
251 		return true;
252 	}
253 
254 	/* Hit end of trace data. */
255 	return false;
256 }
257 
258 /**
259  * fw_trace_get_first() - Find first valid entry in trace
260  * @trace_seq_data: Trace sequence data.
261  *
262  * Skips over invalid (usually zero) and ROGUE_FW_SF_FIRST entries.
263  *
264  * If the trace has no valid entries, this function will exit with the trace
265  * index pointing to the end of the trace. trace_seq_show() will return an error
266  * in this state.
267  */
fw_trace_get_first(struct pvr_fw_trace_seq_data * trace_seq_data)268 static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
269 {
270 	trace_seq_data->idx = 0;
271 
272 	while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
273 		u32 id = read_fw_trace(trace_seq_data, 0);
274 
275 		if (ROGUE_FW_LOG_VALIDID(id)) {
276 			u32 sf_id = find_sfid(id);
277 
278 			if (sf_id != ROGUE_FW_SF_FIRST)
279 				break;
280 		}
281 		trace_seq_data->idx++;
282 	}
283 }
284 
fw_trace_seq_start(struct seq_file * s,loff_t * pos)285 static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
286 {
287 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
288 	u32 i;
289 
290 	/* Reset trace index, then advance to *pos. */
291 	fw_trace_get_first(trace_seq_data);
292 
293 	for (i = 0; i < *pos; i++) {
294 		if (!fw_trace_get_next(trace_seq_data))
295 			return NULL;
296 	}
297 
298 	return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
299 }
300 
fw_trace_seq_next(struct seq_file * s,void * v,loff_t * pos)301 static void *fw_trace_seq_next(struct seq_file *s, void *v, loff_t *pos)
302 {
303 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
304 
305 	(*pos)++;
306 	if (!fw_trace_get_next(trace_seq_data))
307 		return NULL;
308 
309 	return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
310 }
311 
fw_trace_seq_stop(struct seq_file * s,void * v)312 static void fw_trace_seq_stop(struct seq_file *s, void *v)
313 {
314 }
315 
fw_trace_seq_show(struct seq_file * s,void * v)316 static int fw_trace_seq_show(struct seq_file *s, void *v)
317 {
318 	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
319 	u64 timestamp;
320 	u32 id;
321 	u32 sf_id;
322 
323 	if (trace_seq_data->idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
324 		return -EINVAL;
325 
326 	id = read_fw_trace(trace_seq_data, 0);
327 	/* Index is not pointing at a valid entry. */
328 	if (!ROGUE_FW_LOG_VALIDID(id))
329 		return -EINVAL;
330 
331 	sf_id = find_sfid(id);
332 	/* Index is not pointing at a valid entry. */
333 	if (sf_id == ROGUE_FW_SF_LAST)
334 		return -EINVAL;
335 
336 	timestamp = read_fw_trace(trace_seq_data, 1) |
337 		((u64)read_fw_trace(trace_seq_data, 2) << 32);
338 	timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
339 		ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
340 
341 	seq_printf(s, "[%llu] : ", timestamp);
342 	if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
343 		seq_printf(s, "ASSERTION %s failed at %s:%u",
344 			   trace_seq_data->assert_buf.info,
345 			   trace_seq_data->assert_buf.path,
346 			   trace_seq_data->assert_buf.line_num);
347 	} else {
348 		seq_printf(s, stid_fmts[sf_id].name,
349 			   read_fw_trace(trace_seq_data, 3),
350 			   read_fw_trace(trace_seq_data, 4),
351 			   read_fw_trace(trace_seq_data, 5),
352 			   read_fw_trace(trace_seq_data, 6),
353 			   read_fw_trace(trace_seq_data, 7),
354 			   read_fw_trace(trace_seq_data, 8),
355 			   read_fw_trace(trace_seq_data, 9),
356 			   read_fw_trace(trace_seq_data, 10),
357 			   read_fw_trace(trace_seq_data, 11),
358 			   read_fw_trace(trace_seq_data, 12),
359 			   read_fw_trace(trace_seq_data, 13),
360 			   read_fw_trace(trace_seq_data, 14),
361 			   read_fw_trace(trace_seq_data, 15),
362 			   read_fw_trace(trace_seq_data, 16),
363 			   read_fw_trace(trace_seq_data, 17),
364 			   read_fw_trace(trace_seq_data, 18),
365 			   read_fw_trace(trace_seq_data, 19),
366 			   read_fw_trace(trace_seq_data, 20),
367 			   read_fw_trace(trace_seq_data, 21),
368 			   read_fw_trace(trace_seq_data, 22));
369 	}
370 	seq_puts(s, "\n");
371 	return 0;
372 }
373 
374 static const struct seq_operations pvr_fw_trace_seq_ops = {
375 	.start = fw_trace_seq_start,
376 	.next = fw_trace_seq_next,
377 	.stop = fw_trace_seq_stop,
378 	.show = fw_trace_seq_show
379 };
380 
fw_trace_open(struct inode * inode,struct file * file)381 static int fw_trace_open(struct inode *inode, struct file *file)
382 {
383 	struct pvr_fw_trace_buffer *trace_buffer = inode->i_private;
384 	struct rogue_fwif_tracebuf_space *tracebuf_space =
385 		trace_buffer->tracebuf_space;
386 	struct pvr_fw_trace_seq_data *trace_seq_data;
387 	int err;
388 
389 	trace_seq_data = kzalloc(sizeof(*trace_seq_data), GFP_KERNEL);
390 	if (!trace_seq_data)
391 		return -ENOMEM;
392 
393 	trace_seq_data->buffer = kcalloc(ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS,
394 					 sizeof(*trace_seq_data->buffer), GFP_KERNEL);
395 	if (!trace_seq_data->buffer) {
396 		err = -ENOMEM;
397 		goto err_free_data;
398 	}
399 
400 	/*
401 	 * Take a local copy of the trace buffer, as firmware may still be
402 	 * writing to it. This will exist as long as this file is open.
403 	 */
404 	memcpy(trace_seq_data->buffer, trace_buffer->buf,
405 	       ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS * sizeof(u32));
406 	trace_seq_data->start_offset = READ_ONCE(tracebuf_space->trace_pointer);
407 	trace_seq_data->assert_buf = tracebuf_space->assert_buf;
408 	fw_trace_get_first(trace_seq_data);
409 
410 	err = seq_open(file, &pvr_fw_trace_seq_ops);
411 	if (err)
412 		goto err_free_buffer;
413 
414 	((struct seq_file *)file->private_data)->private = trace_seq_data;
415 
416 	return 0;
417 
418 err_free_buffer:
419 	kfree(trace_seq_data->buffer);
420 
421 err_free_data:
422 	kfree(trace_seq_data);
423 
424 	return err;
425 }
426 
fw_trace_release(struct inode * inode,struct file * file)427 static int fw_trace_release(struct inode *inode, struct file *file)
428 {
429 	struct pvr_fw_trace_seq_data *trace_seq_data =
430 		((struct seq_file *)file->private_data)->private;
431 
432 	seq_release(inode, file);
433 	kfree(trace_seq_data->buffer);
434 	kfree(trace_seq_data);
435 
436 	return 0;
437 }
438 
439 static const struct file_operations pvr_fw_trace_fops = {
440 	.owner = THIS_MODULE,
441 	.open = fw_trace_open,
442 	.read = seq_read,
443 	.llseek = seq_lseek,
444 	.release = fw_trace_release,
445 };
446 
447 void
pvr_fw_trace_mask_update(struct pvr_device * pvr_dev,u32 old_mask,u32 new_mask)448 pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
449 {
450 	if (old_mask != new_mask)
451 		update_logtype(pvr_dev, new_mask);
452 }
453 
454 void
pvr_fw_trace_debugfs_init(struct pvr_device * pvr_dev,struct dentry * dir)455 pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
456 {
457 	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
458 	u32 thread_nr;
459 
460 	static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
461 		      "The filename buffer is only large enough for a single-digit thread count");
462 
463 	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
464 		char filename[8];
465 
466 		snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
467 		debugfs_create_file(filename, 0400, dir,
468 				    &fw_trace->buffers[thread_nr],
469 				    &pvr_fw_trace_fops);
470 	}
471 }
472 #endif
473