trace_mmiotrace.c (8dd06ef34b6e2f41b29fbf5fc1663780f2524285) trace_mmiotrace.c (36590c50b2d0729952511129916beeea30d31d81)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory mapped I/O tracing
4 *
5 * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
8#define DEBUG 1

--- 286 unchanged lines hidden (view full) ---

295static void __trace_mmiotrace_rw(struct trace_array *tr,
296 struct trace_array_cpu *data,
297 struct mmiotrace_rw *rw)
298{
299 struct trace_event_call *call = &event_mmiotrace_rw;
300 struct trace_buffer *buffer = tr->array_buffer.buffer;
301 struct ring_buffer_event *event;
302 struct trace_mmiotrace_rw *entry;
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory mapped I/O tracing
4 *
5 * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
8#define DEBUG 1

--- 286 unchanged lines hidden (view full) ---

295static void __trace_mmiotrace_rw(struct trace_array *tr,
296 struct trace_array_cpu *data,
297 struct mmiotrace_rw *rw)
298{
299 struct trace_event_call *call = &event_mmiotrace_rw;
300 struct trace_buffer *buffer = tr->array_buffer.buffer;
301 struct ring_buffer_event *event;
302 struct trace_mmiotrace_rw *entry;
303 int pc = preempt_count();
303 unsigned int trace_ctx;
304
304
305 trace_ctx = tracing_gen_ctx_flags(0);
305 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
306 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
306 sizeof(*entry), 0, pc);
307 sizeof(*entry), trace_ctx);
307 if (!event) {
308 atomic_inc(&dropped_count);
309 return;
310 }
311 entry = ring_buffer_event_data(event);
312 entry->rw = *rw;
313
314 if (!call_filter_check_discard(call, entry, buffer, event))
308 if (!event) {
309 atomic_inc(&dropped_count);
310 return;
311 }
312 entry = ring_buffer_event_data(event);
313 entry->rw = *rw;
314
315 if (!call_filter_check_discard(call, entry, buffer, event))
315 trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
316 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
316}
317
318void mmio_trace_rw(struct mmiotrace_rw *rw)
319{
320 struct trace_array *tr = mmio_trace_array;
321 struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
322 __trace_mmiotrace_rw(tr, data, rw);
323}
324
325static void __trace_mmiotrace_map(struct trace_array *tr,
326 struct trace_array_cpu *data,
327 struct mmiotrace_map *map)
328{
329 struct trace_event_call *call = &event_mmiotrace_map;
330 struct trace_buffer *buffer = tr->array_buffer.buffer;
331 struct ring_buffer_event *event;
332 struct trace_mmiotrace_map *entry;
317}
318
319void mmio_trace_rw(struct mmiotrace_rw *rw)
320{
321 struct trace_array *tr = mmio_trace_array;
322 struct trace_array_cpu *data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
323 __trace_mmiotrace_rw(tr, data, rw);
324}
325
326static void __trace_mmiotrace_map(struct trace_array *tr,
327 struct trace_array_cpu *data,
328 struct mmiotrace_map *map)
329{
330 struct trace_event_call *call = &event_mmiotrace_map;
331 struct trace_buffer *buffer = tr->array_buffer.buffer;
332 struct ring_buffer_event *event;
333 struct trace_mmiotrace_map *entry;
333 int pc = preempt_count();
334 unsigned int trace_ctx;
334
335
336 trace_ctx = tracing_gen_ctx_flags(0);
335 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
337 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
336 sizeof(*entry), 0, pc);
338 sizeof(*entry), trace_ctx);
337 if (!event) {
338 atomic_inc(&dropped_count);
339 return;
340 }
341 entry = ring_buffer_event_data(event);
342 entry->map = *map;
343
344 if (!call_filter_check_discard(call, entry, buffer, event))
339 if (!event) {
340 atomic_inc(&dropped_count);
341 return;
342 }
343 entry = ring_buffer_event_data(event);
344 entry->map = *map;
345
346 if (!call_filter_check_discard(call, entry, buffer, event))
345 trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
347 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
346}
347
348void mmio_trace_mapping(struct mmiotrace_map *map)
349{
350 struct trace_array *tr = mmio_trace_array;
351 struct trace_array_cpu *data;
352
353 preempt_disable();
354 data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
355 __trace_mmiotrace_map(tr, data, map);
356 preempt_enable();
357}
358
359int mmio_trace_printk(const char *fmt, va_list args)
360{
361 return trace_vprintk(0, fmt, args);
362}
348}
349
350void mmio_trace_mapping(struct mmiotrace_map *map)
351{
352 struct trace_array *tr = mmio_trace_array;
353 struct trace_array_cpu *data;
354
355 preempt_disable();
356 data = per_cpu_ptr(tr->array_buffer.data, smp_processor_id());
357 __trace_mmiotrace_map(tr, data, map);
358 preempt_enable();
359}
360
361int mmio_trace_printk(const char *fmt, va_list args)
362{
363 return trace_vprintk(0, fmt, args);
364}