xref: /linux/kernel/trace/trace_snapshot.c (revision cb30bf881c5b4ee8b879558a2fce93d7de652955)
1bade44feSSteven Rostedt // SPDX-License-Identifier: GPL-2.0
2bade44feSSteven Rostedt #include <linux/fsnotify.h>
3bade44feSSteven Rostedt 
4bade44feSSteven Rostedt #include <asm/setup.h> /* COMMAND_LINE_SIZE */
5bade44feSSteven Rostedt 
6bade44feSSteven Rostedt #include "trace.h"
7bade44feSSteven Rostedt 
8bade44feSSteven Rostedt /* Used if snapshot allocated at boot */
9bade44feSSteven Rostedt static bool allocate_snapshot;
10bade44feSSteven Rostedt static bool snapshot_at_boot;
11bade44feSSteven Rostedt 
12bade44feSSteven Rostedt static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
13bade44feSSteven Rostedt static int boot_snapshot_index;
14bade44feSSteven Rostedt 
15bade44feSSteven Rostedt static int __init boot_alloc_snapshot(char *str)
16bade44feSSteven Rostedt {
17bade44feSSteven Rostedt 	char *slot = boot_snapshot_info + boot_snapshot_index;
18bade44feSSteven Rostedt 	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
19bade44feSSteven Rostedt 	int ret;
20bade44feSSteven Rostedt 
21bade44feSSteven Rostedt 	if (str[0] == '=') {
22bade44feSSteven Rostedt 		str++;
23bade44feSSteven Rostedt 		if (strlen(str) >= left)
24bade44feSSteven Rostedt 			return -1;
25bade44feSSteven Rostedt 
26bade44feSSteven Rostedt 		ret = snprintf(slot, left, "%s\t", str);
27bade44feSSteven Rostedt 		boot_snapshot_index += ret;
28bade44feSSteven Rostedt 	} else {
29bade44feSSteven Rostedt 		allocate_snapshot = true;
30bade44feSSteven Rostedt 		/* We also need the main ring buffer expanded */
31bade44feSSteven Rostedt 		trace_set_ring_buffer_expanded(NULL);
32bade44feSSteven Rostedt 	}
33bade44feSSteven Rostedt 	return 1;
34bade44feSSteven Rostedt }
35bade44feSSteven Rostedt __setup("alloc_snapshot", boot_alloc_snapshot);
36bade44feSSteven Rostedt 
37bade44feSSteven Rostedt 
38bade44feSSteven Rostedt static int __init boot_snapshot(char *str)
39bade44feSSteven Rostedt {
40bade44feSSteven Rostedt 	snapshot_at_boot = true;
41bade44feSSteven Rostedt 	boot_alloc_snapshot(str);
42bade44feSSteven Rostedt 	return 1;
43bade44feSSteven Rostedt }
44bade44feSSteven Rostedt __setup("ftrace_boot_snapshot", boot_snapshot);
45bade44feSSteven Rostedt static void tracing_snapshot_instance_cond(struct trace_array *tr,
46bade44feSSteven Rostedt 					   void *cond_data)
47bade44feSSteven Rostedt {
48bade44feSSteven Rostedt 	unsigned long flags;
49bade44feSSteven Rostedt 
50bade44feSSteven Rostedt 	if (in_nmi()) {
51bade44feSSteven Rostedt 		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
52bade44feSSteven Rostedt 		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
53bade44feSSteven Rostedt 		return;
54bade44feSSteven Rostedt 	}
55bade44feSSteven Rostedt 
56bade44feSSteven Rostedt 	if (!tr->allocated_snapshot) {
57bade44feSSteven Rostedt 		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
58bade44feSSteven Rostedt 		trace_array_puts(tr, "*** stopping trace here!   ***\n");
59bade44feSSteven Rostedt 		tracer_tracing_off(tr);
60bade44feSSteven Rostedt 		return;
61bade44feSSteven Rostedt 	}
62bade44feSSteven Rostedt 
63bade44feSSteven Rostedt 	if (tr->mapped) {
64bade44feSSteven Rostedt 		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
65bade44feSSteven Rostedt 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
66bade44feSSteven Rostedt 		return;
67bade44feSSteven Rostedt 	}
68bade44feSSteven Rostedt 
69bade44feSSteven Rostedt 	/* Note, snapshot can not be used when the tracer uses it */
70bade44feSSteven Rostedt 	if (tracer_uses_snapshot(tr->current_trace)) {
71bade44feSSteven Rostedt 		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
72bade44feSSteven Rostedt 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
73bade44feSSteven Rostedt 		return;
74bade44feSSteven Rostedt 	}
75bade44feSSteven Rostedt 
76bade44feSSteven Rostedt 	local_irq_save(flags);
77bade44feSSteven Rostedt 	update_max_tr(tr, current, smp_processor_id(), cond_data);
78bade44feSSteven Rostedt 	local_irq_restore(flags);
79bade44feSSteven Rostedt }
80bade44feSSteven Rostedt 
81bade44feSSteven Rostedt void tracing_snapshot_instance(struct trace_array *tr)
82bade44feSSteven Rostedt {
83bade44feSSteven Rostedt 	tracing_snapshot_instance_cond(tr, NULL);
84bade44feSSteven Rostedt }
85bade44feSSteven Rostedt 
86bade44feSSteven Rostedt /**
87bade44feSSteven Rostedt  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
88bade44feSSteven Rostedt  * @tr:		The tracing instance to snapshot
89bade44feSSteven Rostedt  * @cond_data:	The data to be tested conditionally, and possibly saved
90bade44feSSteven Rostedt  *
91bade44feSSteven Rostedt  * This is the same as tracing_snapshot() except that the snapshot is
92bade44feSSteven Rostedt  * conditional - the snapshot will only happen if the
93bade44feSSteven Rostedt  * cond_snapshot.update() implementation receiving the cond_data
94bade44feSSteven Rostedt  * returns true, which means that the trace array's cond_snapshot
95bade44feSSteven Rostedt  * update() operation used the cond_data to determine whether the
96bade44feSSteven Rostedt  * snapshot should be taken, and if it was, presumably saved it along
97bade44feSSteven Rostedt  * with the snapshot.
98bade44feSSteven Rostedt  */
99bade44feSSteven Rostedt void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
100bade44feSSteven Rostedt {
101bade44feSSteven Rostedt 	tracing_snapshot_instance_cond(tr, cond_data);
102bade44feSSteven Rostedt }
103bade44feSSteven Rostedt EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
104bade44feSSteven Rostedt 
105bade44feSSteven Rostedt /**
106bade44feSSteven Rostedt  * tracing_cond_snapshot_data - get the user data associated with a snapshot
107bade44feSSteven Rostedt  * @tr:		The tracing instance
108bade44feSSteven Rostedt  *
109bade44feSSteven Rostedt  * When the user enables a conditional snapshot using
110bade44feSSteven Rostedt  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
111bade44feSSteven Rostedt  * with the snapshot.  This accessor is used to retrieve it.
112bade44feSSteven Rostedt  *
113bade44feSSteven Rostedt  * Should not be called from cond_snapshot.update(), since it takes
114bade44feSSteven Rostedt  * the tr->max_lock lock, which the code calling
115bade44feSSteven Rostedt  * cond_snapshot.update() has already done.
116bade44feSSteven Rostedt  *
117bade44feSSteven Rostedt  * Returns the cond_data associated with the trace array's snapshot.
118bade44feSSteven Rostedt  */
119bade44feSSteven Rostedt void *tracing_cond_snapshot_data(struct trace_array *tr)
120bade44feSSteven Rostedt {
121bade44feSSteven Rostedt 	void *cond_data = NULL;
122bade44feSSteven Rostedt 
123bade44feSSteven Rostedt 	local_irq_disable();
124bade44feSSteven Rostedt 	arch_spin_lock(&tr->max_lock);
125bade44feSSteven Rostedt 
126bade44feSSteven Rostedt 	if (tr->cond_snapshot)
127bade44feSSteven Rostedt 		cond_data = tr->cond_snapshot->cond_data;
128bade44feSSteven Rostedt 
129bade44feSSteven Rostedt 	arch_spin_unlock(&tr->max_lock);
130bade44feSSteven Rostedt 	local_irq_enable();
131bade44feSSteven Rostedt 
132bade44feSSteven Rostedt 	return cond_data;
133bade44feSSteven Rostedt }
134bade44feSSteven Rostedt EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
135bade44feSSteven Rostedt 
136bade44feSSteven Rostedt /* resize @tr's buffer to the size of @size_tr's entries */
137bade44feSSteven Rostedt int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
138bade44feSSteven Rostedt 				 struct array_buffer *size_buf, int cpu_id)
139bade44feSSteven Rostedt {
140bade44feSSteven Rostedt 	int cpu, ret = 0;
141bade44feSSteven Rostedt 
142bade44feSSteven Rostedt 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
143bade44feSSteven Rostedt 		for_each_tracing_cpu(cpu) {
144bade44feSSteven Rostedt 			ret = ring_buffer_resize(trace_buf->buffer,
145bade44feSSteven Rostedt 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
146bade44feSSteven Rostedt 			if (ret < 0)
147bade44feSSteven Rostedt 				break;
148bade44feSSteven Rostedt 			per_cpu_ptr(trace_buf->data, cpu)->entries =
149bade44feSSteven Rostedt 				per_cpu_ptr(size_buf->data, cpu)->entries;
150bade44feSSteven Rostedt 		}
151bade44feSSteven Rostedt 	} else {
152bade44feSSteven Rostedt 		ret = ring_buffer_resize(trace_buf->buffer,
153bade44feSSteven Rostedt 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
154bade44feSSteven Rostedt 		if (ret == 0)
155bade44feSSteven Rostedt 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
156bade44feSSteven Rostedt 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
157bade44feSSteven Rostedt 	}
158bade44feSSteven Rostedt 
159bade44feSSteven Rostedt 	return ret;
160bade44feSSteven Rostedt }
161bade44feSSteven Rostedt 
162bade44feSSteven Rostedt int tracing_alloc_snapshot_instance(struct trace_array *tr)
163bade44feSSteven Rostedt {
164bade44feSSteven Rostedt 	int order;
165bade44feSSteven Rostedt 	int ret;
166bade44feSSteven Rostedt 
167bade44feSSteven Rostedt 	if (!tr->allocated_snapshot) {
168bade44feSSteven Rostedt 
169bade44feSSteven Rostedt 		/* Make the snapshot buffer have the same order as main buffer */
170bade44feSSteven Rostedt 		order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
171bade44feSSteven Rostedt 		ret = ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, order);
172bade44feSSteven Rostedt 		if (ret < 0)
173bade44feSSteven Rostedt 			return ret;
174bade44feSSteven Rostedt 
175bade44feSSteven Rostedt 		/* allocate spare buffer */
176bade44feSSteven Rostedt 		ret = resize_buffer_duplicate_size(&tr->snapshot_buffer,
177bade44feSSteven Rostedt 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
178bade44feSSteven Rostedt 		if (ret < 0)
179bade44feSSteven Rostedt 			return ret;
180bade44feSSteven Rostedt 
181bade44feSSteven Rostedt 		tr->allocated_snapshot = true;
182bade44feSSteven Rostedt 	}
183bade44feSSteven Rostedt 
184bade44feSSteven Rostedt 	return 0;
185bade44feSSteven Rostedt }
186bade44feSSteven Rostedt 
187bade44feSSteven Rostedt void free_snapshot(struct trace_array *tr)
188bade44feSSteven Rostedt {
189bade44feSSteven Rostedt 	/*
190bade44feSSteven Rostedt 	 * We don't free the ring buffer. instead, resize it because
191bade44feSSteven Rostedt 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
192bade44feSSteven Rostedt 	 * we want preserve it.
193bade44feSSteven Rostedt 	 */
194bade44feSSteven Rostedt 	ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, 0);
195bade44feSSteven Rostedt 	ring_buffer_resize(tr->snapshot_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
196bade44feSSteven Rostedt 	trace_set_buffer_entries(&tr->snapshot_buffer, 1);
197bade44feSSteven Rostedt 	tracing_reset_online_cpus(&tr->snapshot_buffer);
198bade44feSSteven Rostedt 	tr->allocated_snapshot = false;
199bade44feSSteven Rostedt }
200bade44feSSteven Rostedt 
201bade44feSSteven Rostedt int tracing_arm_snapshot_locked(struct trace_array *tr)
202bade44feSSteven Rostedt {
203bade44feSSteven Rostedt 	int ret;
204bade44feSSteven Rostedt 
205bade44feSSteven Rostedt 	lockdep_assert_held(&trace_types_lock);
206bade44feSSteven Rostedt 
207bade44feSSteven Rostedt 	spin_lock(&tr->snapshot_trigger_lock);
208bade44feSSteven Rostedt 	if (tr->snapshot == UINT_MAX || tr->mapped) {
209bade44feSSteven Rostedt 		spin_unlock(&tr->snapshot_trigger_lock);
210bade44feSSteven Rostedt 		return -EBUSY;
211bade44feSSteven Rostedt 	}
212bade44feSSteven Rostedt 
213bade44feSSteven Rostedt 	tr->snapshot++;
214bade44feSSteven Rostedt 	spin_unlock(&tr->snapshot_trigger_lock);
215bade44feSSteven Rostedt 
216bade44feSSteven Rostedt 	ret = tracing_alloc_snapshot_instance(tr);
217bade44feSSteven Rostedt 	if (ret) {
218bade44feSSteven Rostedt 		spin_lock(&tr->snapshot_trigger_lock);
219bade44feSSteven Rostedt 		tr->snapshot--;
220bade44feSSteven Rostedt 		spin_unlock(&tr->snapshot_trigger_lock);
221bade44feSSteven Rostedt 	}
222bade44feSSteven Rostedt 
223bade44feSSteven Rostedt 	return ret;
224bade44feSSteven Rostedt }
225bade44feSSteven Rostedt 
226bade44feSSteven Rostedt int tracing_arm_snapshot(struct trace_array *tr)
227bade44feSSteven Rostedt {
228bade44feSSteven Rostedt 	guard(mutex)(&trace_types_lock);
229bade44feSSteven Rostedt 	return tracing_arm_snapshot_locked(tr);
230bade44feSSteven Rostedt }
231bade44feSSteven Rostedt 
232bade44feSSteven Rostedt void tracing_disarm_snapshot(struct trace_array *tr)
233bade44feSSteven Rostedt {
234bade44feSSteven Rostedt 	spin_lock(&tr->snapshot_trigger_lock);
235bade44feSSteven Rostedt 	if (!WARN_ON(!tr->snapshot))
236bade44feSSteven Rostedt 		tr->snapshot--;
237bade44feSSteven Rostedt 	spin_unlock(&tr->snapshot_trigger_lock);
238bade44feSSteven Rostedt }
239bade44feSSteven Rostedt 
240bade44feSSteven Rostedt /**
241bade44feSSteven Rostedt  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
242bade44feSSteven Rostedt  *
243bade44feSSteven Rostedt  * This is similar to tracing_snapshot(), but it will allocate the
244bade44feSSteven Rostedt  * snapshot buffer if it isn't already allocated. Use this only
245bade44feSSteven Rostedt  * where it is safe to sleep, as the allocation may sleep.
246bade44feSSteven Rostedt  *
247bade44feSSteven Rostedt  * This causes a swap between the snapshot buffer and the current live
248bade44feSSteven Rostedt  * tracing buffer. You can use this to take snapshots of the live
249bade44feSSteven Rostedt  * trace when some condition is triggered, but continue to trace.
250bade44feSSteven Rostedt  */
251bade44feSSteven Rostedt void tracing_snapshot_alloc(void)
252bade44feSSteven Rostedt {
253bade44feSSteven Rostedt 	int ret;
254bade44feSSteven Rostedt 
255bade44feSSteven Rostedt 	ret = tracing_alloc_snapshot();
256bade44feSSteven Rostedt 	if (ret < 0)
257bade44feSSteven Rostedt 		return;
258bade44feSSteven Rostedt 
259bade44feSSteven Rostedt 	tracing_snapshot();
260bade44feSSteven Rostedt }
261bade44feSSteven Rostedt EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
262bade44feSSteven Rostedt 
263bade44feSSteven Rostedt /**
264bade44feSSteven Rostedt  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
265bade44feSSteven Rostedt  * @tr:		The tracing instance
266bade44feSSteven Rostedt  * @cond_data:	User data to associate with the snapshot
267bade44feSSteven Rostedt  * @update:	Implementation of the cond_snapshot update function
268bade44feSSteven Rostedt  *
269bade44feSSteven Rostedt  * Check whether the conditional snapshot for the given instance has
270bade44feSSteven Rostedt  * already been enabled, or if the current tracer is already using a
271bade44feSSteven Rostedt  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
272bade44feSSteven Rostedt  * save the cond_data and update function inside.
273bade44feSSteven Rostedt  *
274bade44feSSteven Rostedt  * Returns 0 if successful, error otherwise.
275bade44feSSteven Rostedt  */
276bade44feSSteven Rostedt int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
277bade44feSSteven Rostedt 				 cond_update_fn_t update)
278bade44feSSteven Rostedt {
279bade44feSSteven Rostedt 	struct cond_snapshot *cond_snapshot __free(kfree) =
280bade44feSSteven Rostedt 		kzalloc_obj(*cond_snapshot);
281bade44feSSteven Rostedt 	int ret;
282bade44feSSteven Rostedt 
283bade44feSSteven Rostedt 	if (!cond_snapshot)
284bade44feSSteven Rostedt 		return -ENOMEM;
285bade44feSSteven Rostedt 
286bade44feSSteven Rostedt 	cond_snapshot->cond_data = cond_data;
287bade44feSSteven Rostedt 	cond_snapshot->update = update;
288bade44feSSteven Rostedt 
289bade44feSSteven Rostedt 	guard(mutex)(&trace_types_lock);
290bade44feSSteven Rostedt 
291bade44feSSteven Rostedt 	if (tracer_uses_snapshot(tr->current_trace))
292bade44feSSteven Rostedt 		return -EBUSY;
293bade44feSSteven Rostedt 
294bade44feSSteven Rostedt 	/*
295bade44feSSteven Rostedt 	 * The cond_snapshot can only change to NULL without the
296bade44feSSteven Rostedt 	 * trace_types_lock. We don't care if we race with it going
297bade44feSSteven Rostedt 	 * to NULL, but we want to make sure that it's not set to
298bade44feSSteven Rostedt 	 * something other than NULL when we get here, which we can
299bade44feSSteven Rostedt 	 * do safely with only holding the trace_types_lock and not
300bade44feSSteven Rostedt 	 * having to take the max_lock.
301bade44feSSteven Rostedt 	 */
302bade44feSSteven Rostedt 	if (tr->cond_snapshot)
303bade44feSSteven Rostedt 		return -EBUSY;
304bade44feSSteven Rostedt 
305bade44feSSteven Rostedt 	ret = tracing_arm_snapshot_locked(tr);
306bade44feSSteven Rostedt 	if (ret)
307bade44feSSteven Rostedt 		return ret;
308bade44feSSteven Rostedt 
309bade44feSSteven Rostedt 	local_irq_disable();
310bade44feSSteven Rostedt 	arch_spin_lock(&tr->max_lock);
311bade44feSSteven Rostedt 	tr->cond_snapshot = no_free_ptr(cond_snapshot);
312bade44feSSteven Rostedt 	arch_spin_unlock(&tr->max_lock);
313bade44feSSteven Rostedt 	local_irq_enable();
314bade44feSSteven Rostedt 
315bade44feSSteven Rostedt 	return 0;
316bade44feSSteven Rostedt }
317bade44feSSteven Rostedt EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
318bade44feSSteven Rostedt 
319bade44feSSteven Rostedt /**
320bade44feSSteven Rostedt  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
321bade44feSSteven Rostedt  * @tr:		The tracing instance
322bade44feSSteven Rostedt  *
323bade44feSSteven Rostedt  * Check whether the conditional snapshot for the given instance is
324bade44feSSteven Rostedt  * enabled; if so, free the cond_snapshot associated with it,
325bade44feSSteven Rostedt  * otherwise return -EINVAL.
326bade44feSSteven Rostedt  *
327bade44feSSteven Rostedt  * Returns 0 if successful, error otherwise.
328bade44feSSteven Rostedt  */
329bade44feSSteven Rostedt int tracing_snapshot_cond_disable(struct trace_array *tr)
330bade44feSSteven Rostedt {
331bade44feSSteven Rostedt 	int ret = 0;
332bade44feSSteven Rostedt 
333bade44feSSteven Rostedt 	local_irq_disable();
334bade44feSSteven Rostedt 	arch_spin_lock(&tr->max_lock);
335bade44feSSteven Rostedt 
336bade44feSSteven Rostedt 	if (!tr->cond_snapshot)
337bade44feSSteven Rostedt 		ret = -EINVAL;
338bade44feSSteven Rostedt 	else {
339bade44feSSteven Rostedt 		kfree(tr->cond_snapshot);
340bade44feSSteven Rostedt 		tr->cond_snapshot = NULL;
341bade44feSSteven Rostedt 	}
342bade44feSSteven Rostedt 
343bade44feSSteven Rostedt 	arch_spin_unlock(&tr->max_lock);
344bade44feSSteven Rostedt 	local_irq_enable();
345bade44feSSteven Rostedt 
346bade44feSSteven Rostedt 	tracing_disarm_snapshot(tr);
347bade44feSSteven Rostedt 
348bade44feSSteven Rostedt 	return ret;
349bade44feSSteven Rostedt }
350bade44feSSteven Rostedt EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
351bade44feSSteven Rostedt 
352bade44feSSteven Rostedt #ifdef CONFIG_TRACER_MAX_TRACE
353bade44feSSteven Rostedt #ifdef LATENCY_FS_NOTIFY
354bade44feSSteven Rostedt static struct workqueue_struct *fsnotify_wq;
355bade44feSSteven Rostedt 
356bade44feSSteven Rostedt static void latency_fsnotify_workfn(struct work_struct *work)
357bade44feSSteven Rostedt {
358bade44feSSteven Rostedt 	struct trace_array *tr = container_of(work, struct trace_array,
359bade44feSSteven Rostedt 					      fsnotify_work);
360bade44feSSteven Rostedt 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
361bade44feSSteven Rostedt }
362bade44feSSteven Rostedt 
363bade44feSSteven Rostedt static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
364bade44feSSteven Rostedt {
365bade44feSSteven Rostedt 	struct trace_array *tr = container_of(iwork, struct trace_array,
366bade44feSSteven Rostedt 					      fsnotify_irqwork);
367bade44feSSteven Rostedt 	queue_work(fsnotify_wq, &tr->fsnotify_work);
368bade44feSSteven Rostedt }
369bade44feSSteven Rostedt 
370bade44feSSteven Rostedt __init static int latency_fsnotify_init(void)
371bade44feSSteven Rostedt {
372bade44feSSteven Rostedt 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
373bade44feSSteven Rostedt 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
374bade44feSSteven Rostedt 	if (!fsnotify_wq) {
375bade44feSSteven Rostedt 		pr_err("Unable to allocate tr_max_lat_wq\n");
376bade44feSSteven Rostedt 		return -ENOMEM;
377bade44feSSteven Rostedt 	}
378bade44feSSteven Rostedt 	return 0;
379bade44feSSteven Rostedt }
380bade44feSSteven Rostedt 
381bade44feSSteven Rostedt late_initcall_sync(latency_fsnotify_init);
382bade44feSSteven Rostedt 
383bade44feSSteven Rostedt void latency_fsnotify(struct trace_array *tr)
384bade44feSSteven Rostedt {
385bade44feSSteven Rostedt 	if (!fsnotify_wq)
386bade44feSSteven Rostedt 		return;
387bade44feSSteven Rostedt 	/*
388bade44feSSteven Rostedt 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
389bade44feSSteven Rostedt 	 * possible that we are called from __schedule() or do_idle(), which
390bade44feSSteven Rostedt 	 * could cause a deadlock.
391bade44feSSteven Rostedt 	 */
392bade44feSSteven Rostedt 	irq_work_queue(&tr->fsnotify_irqwork);
393bade44feSSteven Rostedt }
394bade44feSSteven Rostedt #endif /* LATENCY_FS_NOTIFY */
395*8053f49fSSteven Rostedt 
396bade44feSSteven Rostedt static const struct file_operations tracing_max_lat_fops;
397bade44feSSteven Rostedt 
398bade44feSSteven Rostedt void trace_create_maxlat_file(struct trace_array *tr,
399bade44feSSteven Rostedt 			      struct dentry *d_tracer)
400bade44feSSteven Rostedt {
401bade44feSSteven Rostedt #ifdef LATENCY_FS_NOTIFY
402bade44feSSteven Rostedt 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
403bade44feSSteven Rostedt 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
404bade44feSSteven Rostedt #endif
405bade44feSSteven Rostedt 	tr->d_max_latency = trace_create_file("tracing_max_latency",
406bade44feSSteven Rostedt 					      TRACE_MODE_WRITE,
407bade44feSSteven Rostedt 					      d_tracer, tr,
408bade44feSSteven Rostedt 					      &tracing_max_lat_fops);
409bade44feSSteven Rostedt }
410bade44feSSteven Rostedt 
411bade44feSSteven Rostedt /*
412bade44feSSteven Rostedt  * Copy the new maximum trace into the separate maximum-trace
413bade44feSSteven Rostedt  * structure. (this way the maximum trace is permanently saved,
414bade44feSSteven Rostedt  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
415bade44feSSteven Rostedt  */
416bade44feSSteven Rostedt static void
417bade44feSSteven Rostedt __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
418bade44feSSteven Rostedt {
419bade44feSSteven Rostedt 	struct array_buffer *trace_buf = &tr->array_buffer;
420bade44feSSteven Rostedt 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
421bade44feSSteven Rostedt 	struct array_buffer *max_buf = &tr->snapshot_buffer;
422bade44feSSteven Rostedt 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
423bade44feSSteven Rostedt 
424bade44feSSteven Rostedt 	max_buf->cpu = cpu;
425bade44feSSteven Rostedt 	max_buf->time_start = data->preempt_timestamp;
426bade44feSSteven Rostedt 
427bade44feSSteven Rostedt 	max_data->saved_latency = tr->max_latency;
428bade44feSSteven Rostedt 	max_data->critical_start = data->critical_start;
429bade44feSSteven Rostedt 	max_data->critical_end = data->critical_end;
430bade44feSSteven Rostedt 
431bade44feSSteven Rostedt 	strscpy(max_data->comm, tsk->comm);
432bade44feSSteven Rostedt 	max_data->pid = tsk->pid;
433bade44feSSteven Rostedt 	/*
434bade44feSSteven Rostedt 	 * If tsk == current, then use current_uid(), as that does not use
435bade44feSSteven Rostedt 	 * RCU. The irq tracer can be called out of RCU scope.
436bade44feSSteven Rostedt 	 */
437bade44feSSteven Rostedt 	if (tsk == current)
438bade44feSSteven Rostedt 		max_data->uid = current_uid();
439bade44feSSteven Rostedt 	else
440bade44feSSteven Rostedt 		max_data->uid = task_uid(tsk);
441bade44feSSteven Rostedt 
442bade44feSSteven Rostedt 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
443bade44feSSteven Rostedt 	max_data->policy = tsk->policy;
444bade44feSSteven Rostedt 	max_data->rt_priority = tsk->rt_priority;
445bade44feSSteven Rostedt 
446bade44feSSteven Rostedt 	/* record this tasks comm */
447bade44feSSteven Rostedt 	tracing_record_cmdline(tsk);
448bade44feSSteven Rostedt 	latency_fsnotify(tr);
449bade44feSSteven Rostedt }
450bade44feSSteven Rostedt #else
451bade44feSSteven Rostedt static inline void __update_max_tr(struct trace_array *tr,
452bade44feSSteven Rostedt 				   struct task_struct *tsk, int cpu) { }
453bade44feSSteven Rostedt #endif /* CONFIG_TRACER_MAX_TRACE */
454bade44feSSteven Rostedt 
455bade44feSSteven Rostedt /**
456bade44feSSteven Rostedt  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
457bade44feSSteven Rostedt  * @tr: tracer
458bade44feSSteven Rostedt  * @tsk: the task with the latency
459bade44feSSteven Rostedt  * @cpu: The cpu that initiated the trace.
460bade44feSSteven Rostedt  * @cond_data: User data associated with a conditional snapshot
461bade44feSSteven Rostedt  *
462bade44feSSteven Rostedt  * Flip the buffers between the @tr and the max_tr and record information
463bade44feSSteven Rostedt  * about which task was the cause of this latency.
464bade44feSSteven Rostedt  */
465bade44feSSteven Rostedt void
466bade44feSSteven Rostedt update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
467bade44feSSteven Rostedt 	      void *cond_data)
468bade44feSSteven Rostedt {
469bade44feSSteven Rostedt 	if (tr->stop_count)
470bade44feSSteven Rostedt 		return;
471bade44feSSteven Rostedt 
472bade44feSSteven Rostedt 	WARN_ON_ONCE(!irqs_disabled());
473bade44feSSteven Rostedt 
474bade44feSSteven Rostedt 	if (!tr->allocated_snapshot) {
475bade44feSSteven Rostedt 		/* Only the nop tracer should hit this when disabling */
476bade44feSSteven Rostedt 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
477bade44feSSteven Rostedt 		return;
478bade44feSSteven Rostedt 	}
479bade44feSSteven Rostedt 
480bade44feSSteven Rostedt 	arch_spin_lock(&tr->max_lock);
481bade44feSSteven Rostedt 
482bade44feSSteven Rostedt 	/* Inherit the recordable setting from array_buffer */
483bade44feSSteven Rostedt 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
484bade44feSSteven Rostedt 		ring_buffer_record_on(tr->snapshot_buffer.buffer);
485bade44feSSteven Rostedt 	else
486bade44feSSteven Rostedt 		ring_buffer_record_off(tr->snapshot_buffer.buffer);
487bade44feSSteven Rostedt 
488bade44feSSteven Rostedt 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
489bade44feSSteven Rostedt 		arch_spin_unlock(&tr->max_lock);
490bade44feSSteven Rostedt 		return;
491bade44feSSteven Rostedt 	}
492bade44feSSteven Rostedt 
493bade44feSSteven Rostedt 	swap(tr->array_buffer.buffer, tr->snapshot_buffer.buffer);
494bade44feSSteven Rostedt 
495bade44feSSteven Rostedt 	__update_max_tr(tr, tsk, cpu);
496bade44feSSteven Rostedt 
497bade44feSSteven Rostedt 	arch_spin_unlock(&tr->max_lock);
498bade44feSSteven Rostedt 
499bade44feSSteven Rostedt 	/* Any waiters on the old snapshot buffer need to wake up */
500bade44feSSteven Rostedt 	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
501bade44feSSteven Rostedt }
502bade44feSSteven Rostedt 
503bade44feSSteven Rostedt /**
504bade44feSSteven Rostedt  * update_max_tr_single - only copy one trace over, and reset the rest
505bade44feSSteven Rostedt  * @tr: tracer
506bade44feSSteven Rostedt  * @tsk: task with the latency
507bade44feSSteven Rostedt  * @cpu: the cpu of the buffer to copy.
508bade44feSSteven Rostedt  *
509bade44feSSteven Rostedt  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
510bade44feSSteven Rostedt  */
511bade44feSSteven Rostedt void
512bade44feSSteven Rostedt update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
513bade44feSSteven Rostedt {
514bade44feSSteven Rostedt 	int ret;
515bade44feSSteven Rostedt 
516bade44feSSteven Rostedt 	if (tr->stop_count)
517bade44feSSteven Rostedt 		return;
518bade44feSSteven Rostedt 
519bade44feSSteven Rostedt 	WARN_ON_ONCE(!irqs_disabled());
520bade44feSSteven Rostedt 	if (!tr->allocated_snapshot) {
521bade44feSSteven Rostedt 		/* Only the nop tracer should hit this when disabling */
522bade44feSSteven Rostedt 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
523bade44feSSteven Rostedt 		return;
524bade44feSSteven Rostedt 	}
525bade44feSSteven Rostedt 
526bade44feSSteven Rostedt 	arch_spin_lock(&tr->max_lock);
527bade44feSSteven Rostedt 
528bade44feSSteven Rostedt 	ret = ring_buffer_swap_cpu(tr->snapshot_buffer.buffer, tr->array_buffer.buffer, cpu);
529bade44feSSteven Rostedt 
530bade44feSSteven Rostedt 	if (ret == -EBUSY) {
531bade44feSSteven Rostedt 		/*
532bade44feSSteven Rostedt 		 * We failed to swap the buffer due to a commit taking
533bade44feSSteven Rostedt 		 * place on this CPU. We fail to record, but we reset
534bade44feSSteven Rostedt 		 * the max trace buffer (no one writes directly to it)
535bade44feSSteven Rostedt 		 * and flag that it failed.
536bade44feSSteven Rostedt 		 * Another reason is resize is in progress.
537bade44feSSteven Rostedt 		 */
538bade44feSSteven Rostedt 		trace_array_printk_buf(tr->snapshot_buffer.buffer, _THIS_IP_,
539bade44feSSteven Rostedt 			"Failed to swap buffers due to commit or resize in progress\n");
540bade44feSSteven Rostedt 	}
541bade44feSSteven Rostedt 
542bade44feSSteven Rostedt 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
543bade44feSSteven Rostedt 
544bade44feSSteven Rostedt 	__update_max_tr(tr, tsk, cpu);
545bade44feSSteven Rostedt 	arch_spin_unlock(&tr->max_lock);
546bade44feSSteven Rostedt }
547bade44feSSteven Rostedt 
548bade44feSSteven Rostedt static void show_snapshot_main_help(struct seq_file *m)
549bade44feSSteven Rostedt {
550bade44feSSteven Rostedt 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
551bade44feSSteven Rostedt 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
552bade44feSSteven Rostedt 		    "#                      Takes a snapshot of the main buffer.\n"
553bade44feSSteven Rostedt 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
554bade44feSSteven Rostedt 		    "#                      (Doesn't have to be '2' works with any number that\n"
555bade44feSSteven Rostedt 		    "#                       is not a '0' or '1')\n");
556bade44feSSteven Rostedt }
557bade44feSSteven Rostedt 
558bade44feSSteven Rostedt static void show_snapshot_percpu_help(struct seq_file *m)
559bade44feSSteven Rostedt {
560bade44feSSteven Rostedt 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
561bade44feSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
562bade44feSSteven Rostedt 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
563bade44feSSteven Rostedt 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
564bade44feSSteven Rostedt #else
565bade44feSSteven Rostedt 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
566bade44feSSteven Rostedt 		    "#                     Must use main snapshot file to allocate.\n");
567bade44feSSteven Rostedt #endif
568bade44feSSteven Rostedt 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
569bade44feSSteven Rostedt 		    "#                      (Doesn't have to be '2' works with any number that\n"
570bade44feSSteven Rostedt 		    "#                       is not a '0' or '1')\n");
571bade44feSSteven Rostedt }
572bade44feSSteven Rostedt 
573bade44feSSteven Rostedt void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
574bade44feSSteven Rostedt {
575bade44feSSteven Rostedt 	if (iter->tr->allocated_snapshot)
576bade44feSSteven Rostedt 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
577bade44feSSteven Rostedt 	else
578bade44feSSteven Rostedt 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
579bade44feSSteven Rostedt 
580bade44feSSteven Rostedt 	seq_puts(m, "# Snapshot commands:\n");
581bade44feSSteven Rostedt 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
582bade44feSSteven Rostedt 		show_snapshot_main_help(m);
583bade44feSSteven Rostedt 	else
584bade44feSSteven Rostedt 		show_snapshot_percpu_help(m);
585bade44feSSteven Rostedt }
586bade44feSSteven Rostedt 
587bade44feSSteven Rostedt static int tracing_snapshot_open(struct inode *inode, struct file *file)
588bade44feSSteven Rostedt {
589bade44feSSteven Rostedt 	struct trace_array *tr = inode->i_private;
590bade44feSSteven Rostedt 	struct trace_iterator *iter;
591bade44feSSteven Rostedt 	struct seq_file *m;
592bade44feSSteven Rostedt 	int ret;
593bade44feSSteven Rostedt 
594bade44feSSteven Rostedt 	ret = tracing_check_open_get_tr(tr);
595bade44feSSteven Rostedt 	if (ret)
596bade44feSSteven Rostedt 		return ret;
597bade44feSSteven Rostedt 
598bade44feSSteven Rostedt 	if (file->f_mode & FMODE_READ) {
599bade44feSSteven Rostedt 		iter = __tracing_open(inode, file, true);
600bade44feSSteven Rostedt 		if (IS_ERR(iter))
601bade44feSSteven Rostedt 			ret = PTR_ERR(iter);
602bade44feSSteven Rostedt 	} else {
603bade44feSSteven Rostedt 		/* Writes still need the seq_file to hold the private data */
604bade44feSSteven Rostedt 		ret = -ENOMEM;
605bade44feSSteven Rostedt 		m = kzalloc_obj(*m);
606bade44feSSteven Rostedt 		if (!m)
607bade44feSSteven Rostedt 			goto out;
608bade44feSSteven Rostedt 		iter = kzalloc_obj(*iter);
609bade44feSSteven Rostedt 		if (!iter) {
610bade44feSSteven Rostedt 			kfree(m);
611bade44feSSteven Rostedt 			goto out;
612bade44feSSteven Rostedt 		}
613bade44feSSteven Rostedt 		ret = 0;
614bade44feSSteven Rostedt 
615bade44feSSteven Rostedt 		iter->tr = tr;
616bade44feSSteven Rostedt 		iter->array_buffer = &tr->snapshot_buffer;
617bade44feSSteven Rostedt 		iter->cpu_file = tracing_get_cpu(inode);
618bade44feSSteven Rostedt 		m->private = iter;
619bade44feSSteven Rostedt 		file->private_data = m;
620bade44feSSteven Rostedt 	}
621bade44feSSteven Rostedt out:
622bade44feSSteven Rostedt 	if (ret < 0)
623bade44feSSteven Rostedt 		trace_array_put(tr);
624bade44feSSteven Rostedt 
625bade44feSSteven Rostedt 	return ret;
626bade44feSSteven Rostedt }
627bade44feSSteven Rostedt 
628bade44feSSteven Rostedt static void tracing_swap_cpu_buffer(void *tr)
629bade44feSSteven Rostedt {
630bade44feSSteven Rostedt 	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
631bade44feSSteven Rostedt }
632bade44feSSteven Rostedt 
633bade44feSSteven Rostedt static ssize_t
634bade44feSSteven Rostedt tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
635bade44feSSteven Rostedt 		       loff_t *ppos)
636bade44feSSteven Rostedt {
637bade44feSSteven Rostedt 	struct seq_file *m = filp->private_data;
638bade44feSSteven Rostedt 	struct trace_iterator *iter = m->private;
639bade44feSSteven Rostedt 	struct trace_array *tr = iter->tr;
640bade44feSSteven Rostedt 	unsigned long val;
641bade44feSSteven Rostedt 	int ret;
642bade44feSSteven Rostedt 
643bade44feSSteven Rostedt 	ret = tracing_update_buffers(tr);
644bade44feSSteven Rostedt 	if (ret < 0)
645bade44feSSteven Rostedt 		return ret;
646bade44feSSteven Rostedt 
647bade44feSSteven Rostedt 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
648bade44feSSteven Rostedt 	if (ret)
649bade44feSSteven Rostedt 		return ret;
650bade44feSSteven Rostedt 
651bade44feSSteven Rostedt 	guard(mutex)(&trace_types_lock);
652bade44feSSteven Rostedt 
653bade44feSSteven Rostedt 	if (tracer_uses_snapshot(tr->current_trace))
654bade44feSSteven Rostedt 		return -EBUSY;
655bade44feSSteven Rostedt 
656bade44feSSteven Rostedt 	local_irq_disable();
657bade44feSSteven Rostedt 	arch_spin_lock(&tr->max_lock);
658bade44feSSteven Rostedt 	if (tr->cond_snapshot)
659bade44feSSteven Rostedt 		ret = -EBUSY;
660bade44feSSteven Rostedt 	arch_spin_unlock(&tr->max_lock);
661bade44feSSteven Rostedt 	local_irq_enable();
662bade44feSSteven Rostedt 	if (ret)
663bade44feSSteven Rostedt 		return ret;
664bade44feSSteven Rostedt 
665bade44feSSteven Rostedt 	switch (val) {
666bade44feSSteven Rostedt 	case 0:
667bade44feSSteven Rostedt 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
668bade44feSSteven Rostedt 			return -EINVAL;
669bade44feSSteven Rostedt 		if (tr->allocated_snapshot)
670bade44feSSteven Rostedt 			free_snapshot(tr);
671bade44feSSteven Rostedt 		break;
672bade44feSSteven Rostedt 	case 1:
673bade44feSSteven Rostedt /* Only allow per-cpu swap if the ring buffer supports it */
674bade44feSSteven Rostedt #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
675bade44feSSteven Rostedt 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
676bade44feSSteven Rostedt 			return -EINVAL;
677bade44feSSteven Rostedt #endif
678bade44feSSteven Rostedt 		if (tr->allocated_snapshot)
679bade44feSSteven Rostedt 			ret = resize_buffer_duplicate_size(&tr->snapshot_buffer,
680bade44feSSteven Rostedt 					&tr->array_buffer, iter->cpu_file);
681bade44feSSteven Rostedt 
682bade44feSSteven Rostedt 		ret = tracing_arm_snapshot_locked(tr);
683bade44feSSteven Rostedt 		if (ret)
684bade44feSSteven Rostedt 			return ret;
685bade44feSSteven Rostedt 
686bade44feSSteven Rostedt 		/* Now, we're going to swap */
687bade44feSSteven Rostedt 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
688bade44feSSteven Rostedt 			local_irq_disable();
689bade44feSSteven Rostedt 			update_max_tr(tr, current, smp_processor_id(), NULL);
690bade44feSSteven Rostedt 			local_irq_enable();
691bade44feSSteven Rostedt 		} else {
692bade44feSSteven Rostedt 			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
693bade44feSSteven Rostedt 						 (void *)tr, 1);
694bade44feSSteven Rostedt 		}
695bade44feSSteven Rostedt 		tracing_disarm_snapshot(tr);
696bade44feSSteven Rostedt 		break;
697bade44feSSteven Rostedt 	default:
698bade44feSSteven Rostedt 		if (tr->allocated_snapshot) {
699bade44feSSteven Rostedt 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
700bade44feSSteven Rostedt 				tracing_reset_online_cpus(&tr->snapshot_buffer);
701bade44feSSteven Rostedt 			else
702bade44feSSteven Rostedt 				tracing_reset_cpu(&tr->snapshot_buffer, iter->cpu_file);
703bade44feSSteven Rostedt 		}
704bade44feSSteven Rostedt 		break;
705bade44feSSteven Rostedt 	}
706bade44feSSteven Rostedt 
707bade44feSSteven Rostedt 	if (ret >= 0) {
708bade44feSSteven Rostedt 		*ppos += cnt;
709bade44feSSteven Rostedt 		ret = cnt;
710bade44feSSteven Rostedt 	}
711bade44feSSteven Rostedt 
712bade44feSSteven Rostedt 	return ret;
713bade44feSSteven Rostedt }
714bade44feSSteven Rostedt 
715bade44feSSteven Rostedt static int tracing_snapshot_release(struct inode *inode, struct file *file)
716bade44feSSteven Rostedt {
717bade44feSSteven Rostedt 	struct seq_file *m = file->private_data;
718bade44feSSteven Rostedt 	int ret;
719bade44feSSteven Rostedt 
720bade44feSSteven Rostedt 	ret = tracing_release(inode, file);
721bade44feSSteven Rostedt 
722bade44feSSteven Rostedt 	if (file->f_mode & FMODE_READ)
723bade44feSSteven Rostedt 		return ret;
724bade44feSSteven Rostedt 
725bade44feSSteven Rostedt 	/* If write only, the seq_file is just a stub */
726bade44feSSteven Rostedt 	if (m)
727bade44feSSteven Rostedt 		kfree(m->private);
728bade44feSSteven Rostedt 	kfree(m);
729bade44feSSteven Rostedt 
730bade44feSSteven Rostedt 	return 0;
731bade44feSSteven Rostedt }
732bade44feSSteven Rostedt 
733bade44feSSteven Rostedt static int snapshot_raw_open(struct inode *inode, struct file *filp)
734bade44feSSteven Rostedt {
735bade44feSSteven Rostedt 	struct ftrace_buffer_info *info;
736bade44feSSteven Rostedt 	int ret;
737bade44feSSteven Rostedt 
738bade44feSSteven Rostedt 	/* The following checks for tracefs lockdown */
739bade44feSSteven Rostedt 	ret = tracing_buffers_open(inode, filp);
740bade44feSSteven Rostedt 	if (ret < 0)
741bade44feSSteven Rostedt 		return ret;
742bade44feSSteven Rostedt 
743bade44feSSteven Rostedt 	info = filp->private_data;
744bade44feSSteven Rostedt 
745bade44feSSteven Rostedt 	if (tracer_uses_snapshot(info->iter.trace)) {
746bade44feSSteven Rostedt 		tracing_buffers_release(inode, filp);
747bade44feSSteven Rostedt 		return -EBUSY;
748bade44feSSteven Rostedt 	}
749bade44feSSteven Rostedt 
750bade44feSSteven Rostedt 	info->iter.snapshot = true;
751bade44feSSteven Rostedt 	info->iter.array_buffer = &info->iter.tr->snapshot_buffer;
752bade44feSSteven Rostedt 
753bade44feSSteven Rostedt 	return ret;
754bade44feSSteven Rostedt }
755bade44feSSteven Rostedt 
756bade44feSSteven Rostedt const struct file_operations snapshot_fops = {
757bade44feSSteven Rostedt 	.open		= tracing_snapshot_open,
758bade44feSSteven Rostedt 	.read		= seq_read,
759bade44feSSteven Rostedt 	.write		= tracing_snapshot_write,
760bade44feSSteven Rostedt 	.llseek		= tracing_lseek,
761bade44feSSteven Rostedt 	.release	= tracing_snapshot_release,
762bade44feSSteven Rostedt };
763bade44feSSteven Rostedt 
764bade44feSSteven Rostedt const struct file_operations snapshot_raw_fops = {
765bade44feSSteven Rostedt 	.open		= snapshot_raw_open,
766bade44feSSteven Rostedt 	.read		= tracing_buffers_read,
767bade44feSSteven Rostedt 	.release	= tracing_buffers_release,
768bade44feSSteven Rostedt 	.splice_read	= tracing_buffers_splice_read,
769bade44feSSteven Rostedt };
770bade44feSSteven Rostedt 
771bade44feSSteven Rostedt #ifdef CONFIG_TRACER_MAX_TRACE
772bade44feSSteven Rostedt static ssize_t
773bade44feSSteven Rostedt tracing_max_lat_read(struct file *filp, char __user *ubuf,
774bade44feSSteven Rostedt 		     size_t cnt, loff_t *ppos)
775bade44feSSteven Rostedt {
776bade44feSSteven Rostedt 	struct trace_array *tr = filp->private_data;
777bade44feSSteven Rostedt 
778bade44feSSteven Rostedt 	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
779bade44feSSteven Rostedt }
780bade44feSSteven Rostedt 
781bade44feSSteven Rostedt static ssize_t
782bade44feSSteven Rostedt tracing_max_lat_write(struct file *filp, const char __user *ubuf,
783bade44feSSteven Rostedt 		      size_t cnt, loff_t *ppos)
784bade44feSSteven Rostedt {
785bade44feSSteven Rostedt 	struct trace_array *tr = filp->private_data;
786bade44feSSteven Rostedt 
787bade44feSSteven Rostedt 	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
788bade44feSSteven Rostedt }
789bade44feSSteven Rostedt 
790bade44feSSteven Rostedt static const struct file_operations tracing_max_lat_fops = {
791bade44feSSteven Rostedt 	.open		= tracing_open_generic_tr,
792bade44feSSteven Rostedt 	.read		= tracing_max_lat_read,
793bade44feSSteven Rostedt 	.write		= tracing_max_lat_write,
794bade44feSSteven Rostedt 	.llseek		= generic_file_llseek,
795bade44feSSteven Rostedt 	.release	= tracing_release_generic_tr,
796bade44feSSteven Rostedt };
797bade44feSSteven Rostedt #endif /* CONFIG_TRACER_MAX_TRACE */
798bade44feSSteven Rostedt 
799bade44feSSteven Rostedt int get_snapshot_map(struct trace_array *tr)
800bade44feSSteven Rostedt {
801bade44feSSteven Rostedt 	int err = 0;
802bade44feSSteven Rostedt 
803bade44feSSteven Rostedt 	/*
804bade44feSSteven Rostedt 	 * Called with mmap_lock held. lockdep would be unhappy if we would now
805bade44feSSteven Rostedt 	 * take trace_types_lock. Instead use the specific
806bade44feSSteven Rostedt 	 * snapshot_trigger_lock.
807bade44feSSteven Rostedt 	 */
808bade44feSSteven Rostedt 	spin_lock(&tr->snapshot_trigger_lock);
809bade44feSSteven Rostedt 
810bade44feSSteven Rostedt 	if (tr->snapshot || tr->mapped == UINT_MAX)
811bade44feSSteven Rostedt 		err = -EBUSY;
812bade44feSSteven Rostedt 	else
813bade44feSSteven Rostedt 		tr->mapped++;
814bade44feSSteven Rostedt 
815bade44feSSteven Rostedt 	spin_unlock(&tr->snapshot_trigger_lock);
816bade44feSSteven Rostedt 
817bade44feSSteven Rostedt 	/* Wait for update_max_tr() to observe iter->tr->mapped */
818bade44feSSteven Rostedt 	if (tr->mapped == 1)
819bade44feSSteven Rostedt 		synchronize_rcu();
820bade44feSSteven Rostedt 
821bade44feSSteven Rostedt 	return err;
822bade44feSSteven Rostedt 
823bade44feSSteven Rostedt }
824bade44feSSteven Rostedt 
825bade44feSSteven Rostedt void put_snapshot_map(struct trace_array *tr)
826bade44feSSteven Rostedt {
827bade44feSSteven Rostedt 	spin_lock(&tr->snapshot_trigger_lock);
828bade44feSSteven Rostedt 	if (!WARN_ON(!tr->mapped))
829bade44feSSteven Rostedt 		tr->mapped--;
830bade44feSSteven Rostedt 	spin_unlock(&tr->snapshot_trigger_lock);
831bade44feSSteven Rostedt }
832bade44feSSteven Rostedt 
833bade44feSSteven Rostedt #ifdef CONFIG_DYNAMIC_FTRACE
834bade44feSSteven Rostedt static void
835bade44feSSteven Rostedt ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
836bade44feSSteven Rostedt 		struct trace_array *tr, struct ftrace_probe_ops *ops,
837bade44feSSteven Rostedt 		void *data)
838bade44feSSteven Rostedt {
839bade44feSSteven Rostedt 	tracing_snapshot_instance(tr);
840bade44feSSteven Rostedt }
841bade44feSSteven Rostedt 
842bade44feSSteven Rostedt static void
843bade44feSSteven Rostedt ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
844bade44feSSteven Rostedt 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
845bade44feSSteven Rostedt 		      void *data)
846bade44feSSteven Rostedt {
847bade44feSSteven Rostedt 	struct ftrace_func_mapper *mapper = data;
848bade44feSSteven Rostedt 	long *count = NULL;
849bade44feSSteven Rostedt 
850bade44feSSteven Rostedt 	if (mapper)
851bade44feSSteven Rostedt 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
852bade44feSSteven Rostedt 
853bade44feSSteven Rostedt 	if (count) {
854bade44feSSteven Rostedt 
855bade44feSSteven Rostedt 		if (*count <= 0)
856bade44feSSteven Rostedt 			return;
857bade44feSSteven Rostedt 
858bade44feSSteven Rostedt 		(*count)--;
859bade44feSSteven Rostedt 	}
860bade44feSSteven Rostedt 
861bade44feSSteven Rostedt 	tracing_snapshot_instance(tr);
862bade44feSSteven Rostedt }
863bade44feSSteven Rostedt 
864bade44feSSteven Rostedt static int
865bade44feSSteven Rostedt ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
866bade44feSSteven Rostedt 		      struct ftrace_probe_ops *ops, void *data)
867bade44feSSteven Rostedt {
868bade44feSSteven Rostedt 	struct ftrace_func_mapper *mapper = data;
869bade44feSSteven Rostedt 	long *count = NULL;
870bade44feSSteven Rostedt 
871bade44feSSteven Rostedt 	seq_printf(m, "%ps:", (void *)ip);
872bade44feSSteven Rostedt 
873bade44feSSteven Rostedt 	seq_puts(m, "snapshot");
874bade44feSSteven Rostedt 
875bade44feSSteven Rostedt 	if (mapper)
876bade44feSSteven Rostedt 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
877bade44feSSteven Rostedt 
878bade44feSSteven Rostedt 	if (count)
879bade44feSSteven Rostedt 		seq_printf(m, ":count=%ld\n", *count);
880bade44feSSteven Rostedt 	else
881bade44feSSteven Rostedt 		seq_puts(m, ":unlimited\n");
882bade44feSSteven Rostedt 
883bade44feSSteven Rostedt 	return 0;
884bade44feSSteven Rostedt }
885bade44feSSteven Rostedt 
886bade44feSSteven Rostedt static int
887bade44feSSteven Rostedt ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
888bade44feSSteven Rostedt 		     unsigned long ip, void *init_data, void **data)
889bade44feSSteven Rostedt {
890bade44feSSteven Rostedt 	struct ftrace_func_mapper *mapper = *data;
891bade44feSSteven Rostedt 
892bade44feSSteven Rostedt 	if (!mapper) {
893bade44feSSteven Rostedt 		mapper = allocate_ftrace_func_mapper();
894bade44feSSteven Rostedt 		if (!mapper)
895bade44feSSteven Rostedt 			return -ENOMEM;
896bade44feSSteven Rostedt 		*data = mapper;
897bade44feSSteven Rostedt 	}
898bade44feSSteven Rostedt 
899bade44feSSteven Rostedt 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
900bade44feSSteven Rostedt }
901bade44feSSteven Rostedt 
902bade44feSSteven Rostedt static void
903bade44feSSteven Rostedt ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
904bade44feSSteven Rostedt 		     unsigned long ip, void *data)
905bade44feSSteven Rostedt {
906bade44feSSteven Rostedt 	struct ftrace_func_mapper *mapper = data;
907bade44feSSteven Rostedt 
908bade44feSSteven Rostedt 	if (!ip) {
909bade44feSSteven Rostedt 		if (!mapper)
910bade44feSSteven Rostedt 			return;
911bade44feSSteven Rostedt 		free_ftrace_func_mapper(mapper, NULL);
912bade44feSSteven Rostedt 		return;
913bade44feSSteven Rostedt 	}
914bade44feSSteven Rostedt 
915bade44feSSteven Rostedt 	ftrace_func_mapper_remove_ip(mapper, ip);
916bade44feSSteven Rostedt }
917bade44feSSteven Rostedt 
918bade44feSSteven Rostedt static struct ftrace_probe_ops snapshot_probe_ops = {
919bade44feSSteven Rostedt 	.func			= ftrace_snapshot,
920bade44feSSteven Rostedt 	.print			= ftrace_snapshot_print,
921bade44feSSteven Rostedt };
922bade44feSSteven Rostedt 
923bade44feSSteven Rostedt static struct ftrace_probe_ops snapshot_count_probe_ops = {
924bade44feSSteven Rostedt 	.func			= ftrace_count_snapshot,
925bade44feSSteven Rostedt 	.print			= ftrace_snapshot_print,
926bade44feSSteven Rostedt 	.init			= ftrace_snapshot_init,
927bade44feSSteven Rostedt 	.free			= ftrace_snapshot_free,
928bade44feSSteven Rostedt };
929bade44feSSteven Rostedt 
930bade44feSSteven Rostedt static int
931bade44feSSteven Rostedt ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
932bade44feSSteven Rostedt 			       char *glob, char *cmd, char *param, int enable)
933bade44feSSteven Rostedt {
934bade44feSSteven Rostedt 	struct ftrace_probe_ops *ops;
935bade44feSSteven Rostedt 	void *count = (void *)-1;
936bade44feSSteven Rostedt 	char *number;
937bade44feSSteven Rostedt 	int ret;
938bade44feSSteven Rostedt 
939bade44feSSteven Rostedt 	if (!tr)
940bade44feSSteven Rostedt 		return -ENODEV;
941bade44feSSteven Rostedt 
942bade44feSSteven Rostedt 	/* hash funcs only work with set_ftrace_filter */
943bade44feSSteven Rostedt 	if (!enable)
944bade44feSSteven Rostedt 		return -EINVAL;
945bade44feSSteven Rostedt 
946bade44feSSteven Rostedt 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
947bade44feSSteven Rostedt 
948bade44feSSteven Rostedt 	if (glob[0] == '!') {
949bade44feSSteven Rostedt 		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
950bade44feSSteven Rostedt 		if (!ret)
951bade44feSSteven Rostedt 			tracing_disarm_snapshot(tr);
952bade44feSSteven Rostedt 
953bade44feSSteven Rostedt 		return ret;
954bade44feSSteven Rostedt 	}
955bade44feSSteven Rostedt 
956bade44feSSteven Rostedt 	if (!param)
957bade44feSSteven Rostedt 		goto out_reg;
958bade44feSSteven Rostedt 
959bade44feSSteven Rostedt 	number = strsep(&param, ":");
960bade44feSSteven Rostedt 
961bade44feSSteven Rostedt 	if (!strlen(number))
962bade44feSSteven Rostedt 		goto out_reg;
963bade44feSSteven Rostedt 
964bade44feSSteven Rostedt 	/*
965bade44feSSteven Rostedt 	 * We use the callback data field (which is a pointer)
966bade44feSSteven Rostedt 	 * as our counter.
967bade44feSSteven Rostedt 	 */
968bade44feSSteven Rostedt 	ret = kstrtoul(number, 0, (unsigned long *)&count);
969bade44feSSteven Rostedt 	if (ret)
970bade44feSSteven Rostedt 		return ret;
971bade44feSSteven Rostedt 
972bade44feSSteven Rostedt  out_reg:
973bade44feSSteven Rostedt 	ret = tracing_arm_snapshot(tr);
974bade44feSSteven Rostedt 	if (ret < 0)
975bade44feSSteven Rostedt 		return ret;
976bade44feSSteven Rostedt 
977bade44feSSteven Rostedt 	ret = register_ftrace_function_probe(glob, tr, ops, count);
978bade44feSSteven Rostedt 	if (ret < 0)
979bade44feSSteven Rostedt 		tracing_disarm_snapshot(tr);
980bade44feSSteven Rostedt 
981bade44feSSteven Rostedt 	return ret < 0 ? ret : 0;
982bade44feSSteven Rostedt }
983bade44feSSteven Rostedt 
984bade44feSSteven Rostedt static struct ftrace_func_command ftrace_snapshot_cmd = {
985bade44feSSteven Rostedt 	.name			= "snapshot",
986bade44feSSteven Rostedt 	.func			= ftrace_trace_snapshot_callback,
987bade44feSSteven Rostedt };
988bade44feSSteven Rostedt 
989bade44feSSteven Rostedt __init int register_snapshot_cmd(void)
990bade44feSSteven Rostedt {
991bade44feSSteven Rostedt 	return register_ftrace_command(&ftrace_snapshot_cmd);
992bade44feSSteven Rostedt }
993bade44feSSteven Rostedt #endif /* CONFIG_DYNAMIC_FTRACE */
994bade44feSSteven Rostedt 
995bade44feSSteven Rostedt int trace_allocate_snapshot(struct trace_array *tr, int size)
996bade44feSSteven Rostedt {
997bade44feSSteven Rostedt 	int ret;
998bade44feSSteven Rostedt 
999bade44feSSteven Rostedt 	/* Fix mapped buffer trace arrays do not have snapshot buffers */
1000bade44feSSteven Rostedt 	if (tr->range_addr_start)
1001bade44feSSteven Rostedt 		return 0;
1002bade44feSSteven Rostedt 
1003bade44feSSteven Rostedt 	/* allocate_snapshot can only be true during system boot */
1004bade44feSSteven Rostedt 	ret = allocate_trace_buffer(tr, &tr->snapshot_buffer,
1005bade44feSSteven Rostedt 				    allocate_snapshot ? size : 1);
1006bade44feSSteven Rostedt 	if (ret < 0)
1007bade44feSSteven Rostedt 		return -ENOMEM;
1008bade44feSSteven Rostedt 
1009bade44feSSteven Rostedt 	tr->allocated_snapshot = allocate_snapshot;
1010bade44feSSteven Rostedt 
1011bade44feSSteven Rostedt 	allocate_snapshot = false;
1012bade44feSSteven Rostedt 	return 0;
1013bade44feSSteven Rostedt }
1014bade44feSSteven Rostedt 
1015bade44feSSteven Rostedt __init static bool tr_needs_alloc_snapshot(const char *name)
1016bade44feSSteven Rostedt {
1017bade44feSSteven Rostedt 	char *test;
1018bade44feSSteven Rostedt 	int len = strlen(name);
1019bade44feSSteven Rostedt 	bool ret;
1020bade44feSSteven Rostedt 
1021bade44feSSteven Rostedt 	if (!boot_snapshot_index)
1022bade44feSSteven Rostedt 		return false;
1023bade44feSSteven Rostedt 
1024bade44feSSteven Rostedt 	if (strncmp(name, boot_snapshot_info, len) == 0 &&
1025bade44feSSteven Rostedt 	    boot_snapshot_info[len] == '\t')
1026bade44feSSteven Rostedt 		return true;
1027bade44feSSteven Rostedt 
1028bade44feSSteven Rostedt 	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
1029bade44feSSteven Rostedt 	if (!test)
1030bade44feSSteven Rostedt 		return false;
1031bade44feSSteven Rostedt 
1032bade44feSSteven Rostedt 	sprintf(test, "\t%s\t", name);
1033bade44feSSteven Rostedt 	ret = strstr(boot_snapshot_info, test) == NULL;
1034bade44feSSteven Rostedt 	kfree(test);
1035bade44feSSteven Rostedt 	return ret;
1036bade44feSSteven Rostedt }
1037bade44feSSteven Rostedt 
1038bade44feSSteven Rostedt __init void do_allocate_snapshot(const char *name)
1039bade44feSSteven Rostedt {
1040bade44feSSteven Rostedt 	if (!tr_needs_alloc_snapshot(name))
1041bade44feSSteven Rostedt 		return;
1042bade44feSSteven Rostedt 
1043bade44feSSteven Rostedt 	/*
1044bade44feSSteven Rostedt 	 * When allocate_snapshot is set, the next call to
1045bade44feSSteven Rostedt 	 * allocate_trace_buffers() (called by trace_array_get_by_name())
1046bade44feSSteven Rostedt 	 * will allocate the snapshot buffer. That will also clear
1047bade44feSSteven Rostedt 	 * this flag.
1048bade44feSSteven Rostedt 	 */
1049bade44feSSteven Rostedt 	allocate_snapshot = true;
1050bade44feSSteven Rostedt }
1051bade44feSSteven Rostedt 
1052bade44feSSteven Rostedt void __init ftrace_boot_snapshot(void)
1053bade44feSSteven Rostedt {
1054bade44feSSteven Rostedt 	struct trace_array *tr;
1055bade44feSSteven Rostedt 
1056bade44feSSteven Rostedt 	if (!snapshot_at_boot)
1057bade44feSSteven Rostedt 		return;
1058bade44feSSteven Rostedt 
1059bade44feSSteven Rostedt 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1060bade44feSSteven Rostedt 		if (!tr->allocated_snapshot)
1061bade44feSSteven Rostedt 			continue;
1062bade44feSSteven Rostedt 
1063bade44feSSteven Rostedt 		tracing_snapshot_instance(tr);
1064bade44feSSteven Rostedt 		trace_array_puts(tr, "** Boot snapshot taken **\n");
1065bade44feSSteven Rostedt 	}
1066bade44feSSteven Rostedt }
1067