xref: /linux/kernel/trace/trace_mmiotrace.c (revision 6feb348783767e3f38d7612e6551ee8b580ac4e9)
1 /*
2  * Memory mapped I/O tracing
3  *
4  * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
5  */
6 
7 #define DEBUG 1
8 
9 #include <linux/kernel.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/pci.h>
12 
13 #include "trace.h"
14 
15 struct header_iter {
16 	struct pci_dev *dev;
17 };
18 
19 static struct trace_array *mmio_trace_array;
20 static bool overrun_detected;
21 static unsigned long prev_overruns;
22 
23 static void mmio_reset_data(struct trace_array *tr)
24 {
25 	int cpu;
26 
27 	overrun_detected = false;
28 	prev_overruns = 0;
29 	tr->time_start = ftrace_now(tr->cpu);
30 
31 	for_each_online_cpu(cpu)
32 		tracing_reset(tr, cpu);
33 }
34 
35 static void mmio_trace_init(struct trace_array *tr)
36 {
37 	pr_debug("in %s\n", __func__);
38 	mmio_trace_array = tr;
39 	if (tr->ctrl) {
40 		mmio_reset_data(tr);
41 		enable_mmiotrace();
42 	}
43 }
44 
45 static void mmio_trace_reset(struct trace_array *tr)
46 {
47 	pr_debug("in %s\n", __func__);
48 	if (tr->ctrl)
49 		disable_mmiotrace();
50 	mmio_reset_data(tr);
51 	mmio_trace_array = NULL;
52 }
53 
54 static void mmio_trace_ctrl_update(struct trace_array *tr)
55 {
56 	pr_debug("in %s\n", __func__);
57 	if (tr->ctrl) {
58 		mmio_reset_data(tr);
59 		enable_mmiotrace();
60 	} else {
61 		disable_mmiotrace();
62 	}
63 }
64 
65 static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
66 {
67 	int ret = 0;
68 	int i;
69 	resource_size_t start, end;
70 	const struct pci_driver *drv = pci_dev_driver(dev);
71 
72 	/* XXX: incomplete checks for trace_seq_printf() return value */
73 	ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
74 				dev->bus->number, dev->devfn,
75 				dev->vendor, dev->device, dev->irq);
76 	/*
77 	 * XXX: is pci_resource_to_user() appropriate, since we are
78 	 * supposed to interpret the __ioremap() phys_addr argument based on
79 	 * these printed values?
80 	 */
81 	for (i = 0; i < 7; i++) {
82 		pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
83 		ret += trace_seq_printf(s, " %llx",
84 			(unsigned long long)(start |
85 			(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
86 	}
87 	for (i = 0; i < 7; i++) {
88 		pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
89 		ret += trace_seq_printf(s, " %llx",
90 			dev->resource[i].start < dev->resource[i].end ?
91 			(unsigned long long)(end - start) + 1 : 0);
92 	}
93 	if (drv)
94 		ret += trace_seq_printf(s, " %s\n", drv->name);
95 	else
96 		ret += trace_seq_printf(s, " \n");
97 	return ret;
98 }
99 
100 static void destroy_header_iter(struct header_iter *hiter)
101 {
102 	if (!hiter)
103 		return;
104 	pci_dev_put(hiter->dev);
105 	kfree(hiter);
106 }
107 
108 static void mmio_pipe_open(struct trace_iterator *iter)
109 {
110 	struct header_iter *hiter;
111 	struct trace_seq *s = &iter->seq;
112 
113 	trace_seq_printf(s, "VERSION 20070824\n");
114 
115 	hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
116 	if (!hiter)
117 		return;
118 
119 	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
120 	iter->private = hiter;
121 }
122 
123 /* XXX: This is not called when the pipe is closed! */
124 static void mmio_close(struct trace_iterator *iter)
125 {
126 	struct header_iter *hiter = iter->private;
127 	destroy_header_iter(hiter);
128 	iter->private = NULL;
129 }
130 
131 static unsigned long count_overruns(struct trace_iterator *iter)
132 {
133 	unsigned long cnt = 0;
134 	unsigned long over = ring_buffer_overruns(iter->tr->buffer);
135 
136 	if (over > prev_overruns)
137 		cnt = over - prev_overruns;
138 	prev_overruns = over;
139 	return cnt;
140 }
141 
142 static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
143 				char __user *ubuf, size_t cnt, loff_t *ppos)
144 {
145 	ssize_t ret;
146 	struct header_iter *hiter = iter->private;
147 	struct trace_seq *s = &iter->seq;
148 	unsigned long n;
149 
150 	n = count_overruns(iter);
151 	if (n) {
152 		/* XXX: This is later than where events were lost. */
153 		trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
154 		if (!overrun_detected)
155 			pr_warning("mmiotrace has lost events.\n");
156 		overrun_detected = true;
157 		goto print_out;
158 	}
159 
160 	if (!hiter)
161 		return 0;
162 
163 	mmio_print_pcidev(s, hiter->dev);
164 	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
165 
166 	if (!hiter->dev) {
167 		destroy_header_iter(hiter);
168 		iter->private = NULL;
169 	}
170 
171 print_out:
172 	ret = trace_seq_to_user(s, ubuf, cnt);
173 	return (ret == -EBUSY) ? 0 : ret;
174 }
175 
176 static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
177 {
178 	struct trace_entry *entry = iter->ent;
179 	struct trace_mmiotrace_rw *field;
180 	struct mmiotrace_rw *rw;
181 	struct trace_seq *s	= &iter->seq;
182 	unsigned long long t	= ns2usecs(iter->ts);
183 	unsigned long usec_rem	= do_div(t, 1000000ULL);
184 	unsigned secs		= (unsigned long)t;
185 	int ret = 1;
186 
187 	trace_assign_type(field, entry);
188 	rw = &field->rw;
189 
190 	switch (rw->opcode) {
191 	case MMIO_READ:
192 		ret = trace_seq_printf(s,
193 			"R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
194 			rw->width, secs, usec_rem, rw->map_id,
195 			(unsigned long long)rw->phys,
196 			rw->value, rw->pc, 0);
197 		break;
198 	case MMIO_WRITE:
199 		ret = trace_seq_printf(s,
200 			"W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
201 			rw->width, secs, usec_rem, rw->map_id,
202 			(unsigned long long)rw->phys,
203 			rw->value, rw->pc, 0);
204 		break;
205 	case MMIO_UNKNOWN_OP:
206 		ret = trace_seq_printf(s,
207 			"UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n",
208 			secs, usec_rem, rw->map_id,
209 			(unsigned long long)rw->phys,
210 			(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
211 			(rw->value >> 0) & 0xff, rw->pc, 0);
212 		break;
213 	default:
214 		ret = trace_seq_printf(s, "rw what?\n");
215 		break;
216 	}
217 	if (ret)
218 		return TRACE_TYPE_HANDLED;
219 	return TRACE_TYPE_PARTIAL_LINE;
220 }
221 
222 static enum print_line_t mmio_print_map(struct trace_iterator *iter)
223 {
224 	struct trace_entry *entry = iter->ent;
225 	struct trace_mmiotrace_map *field;
226 	struct mmiotrace_map *m;
227 	struct trace_seq *s	= &iter->seq;
228 	unsigned long long t	= ns2usecs(iter->ts);
229 	unsigned long usec_rem	= do_div(t, 1000000ULL);
230 	unsigned secs		= (unsigned long)t;
231 	int ret;
232 
233 	trace_assign_type(field, entry);
234 	m = &field->map;
235 
236 	switch (m->opcode) {
237 	case MMIO_PROBE:
238 		ret = trace_seq_printf(s,
239 			"MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
240 			secs, usec_rem, m->map_id,
241 			(unsigned long long)m->phys, m->virt, m->len,
242 			0UL, 0);
243 		break;
244 	case MMIO_UNPROBE:
245 		ret = trace_seq_printf(s,
246 			"UNMAP %lu.%06lu %d 0x%lx %d\n",
247 			secs, usec_rem, m->map_id, 0UL, 0);
248 		break;
249 	default:
250 		ret = trace_seq_printf(s, "map what?\n");
251 		break;
252 	}
253 	if (ret)
254 		return TRACE_TYPE_HANDLED;
255 	return TRACE_TYPE_PARTIAL_LINE;
256 }
257 
258 static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
259 {
260 	struct trace_entry *entry = iter->ent;
261 	struct print_entry *print = (struct print_entry *)entry;
262 	const char *msg		= print->buf;
263 	struct trace_seq *s	= &iter->seq;
264 	unsigned long long t	= ns2usecs(iter->ts);
265 	unsigned long usec_rem	= do_div(t, 1000000ULL);
266 	unsigned secs		= (unsigned long)t;
267 	int ret;
268 
269 	/* The trailing newline must be in the message. */
270 	ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
271 	if (!ret)
272 		return TRACE_TYPE_PARTIAL_LINE;
273 
274 	if (entry->flags & TRACE_FLAG_CONT)
275 		trace_seq_print_cont(s, iter);
276 
277 	return TRACE_TYPE_HANDLED;
278 }
279 
280 static enum print_line_t mmio_print_line(struct trace_iterator *iter)
281 {
282 	switch (iter->ent->type) {
283 	case TRACE_MMIO_RW:
284 		return mmio_print_rw(iter);
285 	case TRACE_MMIO_MAP:
286 		return mmio_print_map(iter);
287 	case TRACE_PRINT:
288 		return mmio_print_mark(iter);
289 	default:
290 		return TRACE_TYPE_HANDLED; /* ignore unknown entries */
291 	}
292 }
293 
294 static struct tracer mmio_tracer __read_mostly =
295 {
296 	.name		= "mmiotrace",
297 	.init		= mmio_trace_init,
298 	.reset		= mmio_trace_reset,
299 	.pipe_open	= mmio_pipe_open,
300 	.close		= mmio_close,
301 	.read		= mmio_read,
302 	.ctrl_update	= mmio_trace_ctrl_update,
303 	.print_line	= mmio_print_line,
304 };
305 
306 __init static int init_mmio_trace(void)
307 {
308 	return register_tracer(&mmio_tracer);
309 }
310 device_initcall(init_mmio_trace);
311 
312 static void __trace_mmiotrace_rw(struct trace_array *tr,
313 				struct trace_array_cpu *data,
314 				struct mmiotrace_rw *rw)
315 {
316 	struct ring_buffer_event *event;
317 	struct trace_mmiotrace_rw *entry;
318 	unsigned long irq_flags;
319 
320 	event	= ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
321 					   &irq_flags);
322 	if (!event)
323 		return;
324 	entry	= ring_buffer_event_data(event);
325 	tracing_generic_entry_update(&entry->ent, 0, preempt_count());
326 	entry->ent.type			= TRACE_MMIO_RW;
327 	entry->rw			= *rw;
328 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
329 
330 	trace_wake_up();
331 }
332 
333 void mmio_trace_rw(struct mmiotrace_rw *rw)
334 {
335 	struct trace_array *tr = mmio_trace_array;
336 	struct trace_array_cpu *data = tr->data[smp_processor_id()];
337 	__trace_mmiotrace_rw(tr, data, rw);
338 }
339 
340 static void __trace_mmiotrace_map(struct trace_array *tr,
341 				struct trace_array_cpu *data,
342 				struct mmiotrace_map *map)
343 {
344 	struct ring_buffer_event *event;
345 	struct trace_mmiotrace_map *entry;
346 	unsigned long irq_flags;
347 
348 	event	= ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
349 					   &irq_flags);
350 	if (!event)
351 		return;
352 	entry	= ring_buffer_event_data(event);
353 	tracing_generic_entry_update(&entry->ent, 0, preempt_count());
354 	entry->ent.type			= TRACE_MMIO_MAP;
355 	entry->map			= *map;
356 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
357 
358 	trace_wake_up();
359 }
360 
361 void mmio_trace_mapping(struct mmiotrace_map *map)
362 {
363 	struct trace_array *tr = mmio_trace_array;
364 	struct trace_array_cpu *data;
365 
366 	preempt_disable();
367 	data = tr->data[smp_processor_id()];
368 	__trace_mmiotrace_map(tr, data, map);
369 	preempt_enable();
370 }
371 
372 int mmio_trace_printk(const char *fmt, va_list args)
373 {
374 	return trace_vprintk(0, fmt, args);
375 }
376