xref: /linux/kernel/trace/blktrace.c (revision a028739a4330881a6a3b5aa4a39381bbcacf2f2f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4  *
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/percpu.h>
13 #include <linux/init.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/debugfs.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/blk-cgroup.h>
22 
23 #include "../../block/blk.h"
24 
25 #include <trace/events/block.h>
26 
27 #include "trace_output.h"
28 
29 #ifdef CONFIG_BLK_DEV_IO_TRACE
30 
31 static unsigned int blktrace_seq __read_mostly = 1;
32 
33 static struct trace_array *blk_tr;
34 static bool blk_tracer_enabled __read_mostly;
35 
36 static LIST_HEAD(running_trace_list);
37 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
38 
39 /* Select an alternative, minimalistic output than the original one */
40 #define TRACE_BLK_OPT_CLASSIC	0x1
41 #define TRACE_BLK_OPT_CGROUP	0x2
42 #define TRACE_BLK_OPT_CGNAME	0x4
43 
44 static struct tracer_opt blk_tracer_opts[] = {
45 	/* Default disable the minimalistic output */
46 	{ TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47 #ifdef CONFIG_BLK_CGROUP
48 	{ TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
49 	{ TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
50 #endif
51 	{ }
52 };
53 
54 static struct tracer_flags blk_tracer_flags = {
55 	.val  = 0,
56 	.opts = blk_tracer_opts,
57 };
58 
59 /* Global reference count of probes */
60 static DEFINE_MUTEX(blk_probe_mutex);
61 static int blk_probes_ref;
62 
63 static void blk_register_tracepoints(void);
64 static void blk_unregister_tracepoints(void);
65 
record_blktrace_event(struct blk_io_trace * t,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,dev_t dev,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)66 static void record_blktrace_event(struct blk_io_trace *t, pid_t pid, int cpu,
67 				  sector_t sector, int bytes, u64 what,
68 				  dev_t dev, int error, u64 cgid,
69 				  ssize_t cgid_len, void *pdu_data, int pdu_len)
70 
71 {
72 	/*
73 	 * These two are not needed in ftrace as they are in the
74 	 * generic trace_entry, filled by tracing_generic_entry_update,
75 	 * but for the trace_event->bin() synthesizer benefit we do it
76 	 * here too.
77 	 */
78 	t->cpu = cpu;
79 	t->pid = pid;
80 
81 	t->sector = sector;
82 	t->bytes = bytes;
83 	t->action = lower_32_bits(what);
84 	t->device = dev;
85 	t->error = error;
86 	t->pdu_len = pdu_len + cgid_len;
87 
88 	if (cgid_len)
89 		memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
90 	if (pdu_len)
91 		memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
92 }
93 
record_blktrace_event2(struct blk_io_trace2 * t2,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,dev_t dev,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)94 static void record_blktrace_event2(struct blk_io_trace2 *t2, pid_t pid, int cpu,
95 				   sector_t sector, int bytes, u64 what,
96 				   dev_t dev, int error, u64 cgid,
97 				   ssize_t cgid_len, void *pdu_data,
98 				   int pdu_len)
99 {
100 	t2->pid = pid;
101 	t2->cpu = cpu;
102 
103 	t2->sector = sector;
104 	t2->bytes = bytes;
105 	t2->action = what;
106 	t2->device = dev;
107 	t2->error = error;
108 	t2->pdu_len = pdu_len + cgid_len;
109 
110 	if (cgid_len)
111 		memcpy((void *)t2 + sizeof(*t2), &cgid, cgid_len);
112 	if (pdu_len)
113 		memcpy((void *)t2 + sizeof(*t2) + cgid_len, pdu_data, pdu_len);
114 }
115 
relay_blktrace_event1(struct blk_trace * bt,unsigned long sequence,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)116 static void relay_blktrace_event1(struct blk_trace *bt, unsigned long sequence,
117 				 pid_t pid, int cpu, sector_t sector, int bytes,
118 				 u64 what, int error, u64 cgid,
119 				 ssize_t cgid_len, void *pdu_data, int pdu_len)
120 {
121 	struct blk_io_trace *t;
122 	size_t trace_len = sizeof(*t) + pdu_len + cgid_len;
123 
124 	t = relay_reserve(bt->rchan, trace_len);
125 	if (!t)
126 		return;
127 
128 	t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
129 	t->sequence = sequence;
130 	t->time = ktime_to_ns(ktime_get());
131 
132 	record_blktrace_event(t, pid, cpu, sector, bytes, what, bt->dev, error,
133 			      cgid, cgid_len, pdu_data, pdu_len);
134 }
135 
relay_blktrace_event2(struct blk_trace * bt,unsigned long sequence,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)136 static void relay_blktrace_event2(struct blk_trace *bt, unsigned long sequence,
137 				  pid_t pid, int cpu, sector_t sector,
138 				  int bytes, u64 what, int error, u64 cgid,
139 				  ssize_t cgid_len, void *pdu_data, int pdu_len)
140 {
141 	struct blk_io_trace2 *t;
142 	size_t trace_len = sizeof(struct blk_io_trace2) + pdu_len + cgid_len;
143 
144 	t = relay_reserve(bt->rchan, trace_len);
145 	if (!t)
146 		return;
147 
148 	t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE2_VERSION;
149 	t->sequence = sequence;
150 	t->time = ktime_to_ns(ktime_get());
151 
152 	record_blktrace_event2(t, pid, cpu, sector, bytes, what, bt->dev, error,
153 			       cgid, cgid_len, pdu_data, pdu_len);
154 }
155 
relay_blktrace_event(struct blk_trace * bt,unsigned long sequence,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)156 static void relay_blktrace_event(struct blk_trace *bt, unsigned long sequence,
157 				 pid_t pid, int cpu, sector_t sector, int bytes,
158 				 u64 what, int error, u64 cgid,
159 				 ssize_t cgid_len, void *pdu_data, int pdu_len)
160 {
161 	if (bt->version == 2)
162 		return relay_blktrace_event2(bt, sequence, pid, cpu, sector,
163 					     bytes, what, error, cgid, cgid_len,
164 					     pdu_data, pdu_len);
165 	return relay_blktrace_event1(bt, sequence, pid, cpu, sector, bytes,
166 				     what, error, cgid, cgid_len, pdu_data,
167 				     pdu_len);
168 }
169 
170 /*
171  * Send out a notify message.
172  */
trace_note(struct blk_trace * bt,pid_t pid,u64 action,const void * data,size_t len,u64 cgid)173 static void trace_note(struct blk_trace *bt, pid_t pid, u64 action,
174 		       const void *data, size_t len, u64 cgid)
175 {
176 	struct ring_buffer_event *event = NULL;
177 	struct trace_buffer *buffer = NULL;
178 	unsigned int trace_ctx = 0;
179 	int cpu = smp_processor_id();
180 	bool blk_tracer = blk_tracer_enabled;
181 	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
182 
183 	action = lower_32_bits(action | (cgid ? __BLK_TN_CGROUP : 0));
184 	if (blk_tracer) {
185 		struct blk_io_trace2 *t;
186 		size_t trace_len = sizeof(*t) + cgid_len + len;
187 
188 		buffer = blk_tr->array_buffer.buffer;
189 		trace_ctx = tracing_gen_ctx_flags(0);
190 		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
191 						  trace_len, trace_ctx);
192 		if (!event)
193 			return;
194 		t = ring_buffer_event_data(event);
195 		record_blktrace_event2(t, pid, cpu, 0, 0,
196 				       action, bt->dev, 0, cgid, cgid_len,
197 				       (void *)data, len);
198 		trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
199 		return;
200 	}
201 
202 	if (!bt->rchan)
203 		return;
204 
205 	relay_blktrace_event(bt, 0, pid, cpu, 0, 0, action, 0, cgid,
206 			     cgid_len, (void *)data, len);
207 }
208 
209 /*
210  * Send out a notify for this process, if we haven't done so since a trace
211  * started
212  */
trace_note_tsk(struct task_struct * tsk)213 static void trace_note_tsk(struct task_struct *tsk)
214 {
215 	unsigned long flags;
216 	struct blk_trace *bt;
217 
218 	tsk->btrace_seq = blktrace_seq;
219 	raw_spin_lock_irqsave(&running_trace_lock, flags);
220 	list_for_each_entry(bt, &running_trace_list, running_list) {
221 		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
222 			   sizeof(tsk->comm), 0);
223 	}
224 	raw_spin_unlock_irqrestore(&running_trace_lock, flags);
225 }
226 
trace_note_time(struct blk_trace * bt)227 static void trace_note_time(struct blk_trace *bt)
228 {
229 	struct timespec64 now;
230 	unsigned long flags;
231 	u32 words[2];
232 
233 	/* need to check user space to see if this breaks in y2038 or y2106 */
234 	ktime_get_real_ts64(&now);
235 	words[0] = (u32)now.tv_sec;
236 	words[1] = now.tv_nsec;
237 
238 	local_irq_save(flags);
239 	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
240 	local_irq_restore(flags);
241 }
242 
__blk_trace_note_message(struct blk_trace * bt,struct cgroup_subsys_state * css,const char * fmt,...)243 void __blk_trace_note_message(struct blk_trace *bt,
244 		struct cgroup_subsys_state *css, const char *fmt, ...)
245 {
246 	int n;
247 	va_list args;
248 	unsigned long flags;
249 	char *buf;
250 	u64 cgid = 0;
251 
252 	if (unlikely(bt->trace_state != Blktrace_running &&
253 		     !blk_tracer_enabled))
254 		return;
255 
256 	/*
257 	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
258 	 * message to the trace.
259 	 */
260 	if (!(bt->act_mask & BLK_TC_NOTIFY))
261 		return;
262 
263 	local_irq_save(flags);
264 	buf = this_cpu_ptr(bt->msg_data);
265 	va_start(args, fmt);
266 	n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
267 	va_end(args);
268 
269 #ifdef CONFIG_BLK_CGROUP
270 	if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
271 		cgid = cgroup_id(css->cgroup);
272 	else
273 		cgid = 1;
274 #endif
275 	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid);
276 	local_irq_restore(flags);
277 }
278 EXPORT_SYMBOL_GPL(__blk_trace_note_message);
279 
act_log_check(struct blk_trace * bt,u64 what,sector_t sector,pid_t pid)280 static int act_log_check(struct blk_trace *bt, u64 what, sector_t sector,
281 			 pid_t pid)
282 {
283 	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
284 		return 1;
285 	if (sector && (sector < bt->start_lba || sector > bt->end_lba))
286 		return 1;
287 	if (bt->pid && pid != bt->pid)
288 		return 1;
289 
290 	return 0;
291 }
292 
293 /*
294  * Data direction bit lookup
295  */
296 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
297 				 BLK_TC_ACT(BLK_TC_WRITE) };
298 
299 #define BLK_TC_RAHEAD		BLK_TC_AHEAD
300 #define BLK_TC_PREFLUSH		BLK_TC_FLUSH
301 
302 /* The ilog2() calls fall out because they're constant */
303 #define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) <<	\
304 	  (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
305 
306 /*
307  * The worker for the various blk_add_trace*() types. Fills out a
308  * blk_io_trace structure and places it in a per-cpu subbuffer.
309  */
__blk_add_trace(struct blk_trace * bt,sector_t sector,int bytes,const blk_opf_t opf,u64 what,int error,int pdu_len,void * pdu_data,u64 cgid)310 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
311 			    const blk_opf_t opf, u64 what, int error,
312 			    int pdu_len, void *pdu_data, u64 cgid)
313 {
314 	struct task_struct *tsk = current;
315 	struct ring_buffer_event *event = NULL;
316 	struct trace_buffer *buffer = NULL;
317 	unsigned long flags = 0;
318 	unsigned long *sequence;
319 	unsigned int trace_ctx = 0;
320 	pid_t pid;
321 	int cpu;
322 	bool blk_tracer = blk_tracer_enabled;
323 	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
324 	const enum req_op op = opf & REQ_OP_MASK;
325 	size_t trace_len;
326 
327 	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
328 		return;
329 
330 	what |= ddir_act[op_is_write(op) ? WRITE : READ];
331 	what |= MASK_TC_BIT(opf, SYNC);
332 	what |= MASK_TC_BIT(opf, RAHEAD);
333 	what |= MASK_TC_BIT(opf, META);
334 	what |= MASK_TC_BIT(opf, PREFLUSH);
335 	what |= MASK_TC_BIT(opf, FUA);
336 
337 	switch (op) {
338 	case REQ_OP_DISCARD:
339 	case REQ_OP_SECURE_ERASE:
340 		what |= BLK_TC_ACT(BLK_TC_DISCARD);
341 		break;
342 	case REQ_OP_FLUSH:
343 		what |= BLK_TC_ACT(BLK_TC_FLUSH);
344 		break;
345 	case REQ_OP_ZONE_APPEND:
346 		what |= BLK_TC_ACT(BLK_TC_ZONE_APPEND);
347 		break;
348 	case REQ_OP_ZONE_RESET:
349 		what |= BLK_TC_ACT(BLK_TC_ZONE_RESET);
350 		break;
351 	case REQ_OP_ZONE_RESET_ALL:
352 		what |= BLK_TC_ACT(BLK_TC_ZONE_RESET_ALL);
353 		break;
354 	case REQ_OP_ZONE_FINISH:
355 		what |= BLK_TC_ACT(BLK_TC_ZONE_FINISH);
356 		break;
357 	case REQ_OP_ZONE_OPEN:
358 		what |= BLK_TC_ACT(BLK_TC_ZONE_OPEN);
359 		break;
360 	case REQ_OP_ZONE_CLOSE:
361 		what |= BLK_TC_ACT(BLK_TC_ZONE_CLOSE);
362 		break;
363 	case REQ_OP_WRITE_ZEROES:
364 		what |= BLK_TC_ACT(BLK_TC_WRITE_ZEROES);
365 		break;
366 	default:
367 		break;
368 	}
369 
370 	/* Drop trace events for zone operations with blktrace v1 */
371 	if (bt->version == 1 && (what >> BLK_TC_SHIFT) > BLK_TC_END_V1) {
372 		pr_debug_ratelimited("blktrace v1 cannot trace zone operation 0x%llx\n",
373 				(unsigned long long)what);
374 		return;
375 	}
376 
377 	if (cgid)
378 		what |= __BLK_TA_CGROUP;
379 
380 	pid = tsk->pid;
381 	if (act_log_check(bt, what, sector, pid))
382 		return;
383 	cpu = raw_smp_processor_id();
384 
385 	if (blk_tracer) {
386 		buffer = blk_tr->array_buffer.buffer;
387 		trace_ctx = tracing_gen_ctx_flags(0);
388 		switch (bt->version) {
389 		case 1:
390 			trace_len = sizeof(struct blk_io_trace);
391 			break;
392 		case 2:
393 		default:
394 			/*
395 			 * ftrace always uses v2 (blk_io_trace2) format.
396 			 *
397 			 * For sysfs-enabled tracing path (enabled via
398 			 * /sys/block/DEV/trace/enable), blk_trace_setup_queue()
399 			 * never initializes bt->version, leaving it 0 from
400 			 * kzalloc(). We must handle version==0 safely here.
401 			 *
402 			 * Fall through to default to ensure we never hit the
403 			 * old bug where default set trace_len=0, causing
404 			 * buffer underflow and memory corruption.
405 			 *
406 			 * Always use v2 format for ftrace and normalize
407 			 * bt->version to 2 when uninitialized.
408 			 */
409 			trace_len = sizeof(struct blk_io_trace2);
410 			if (bt->version == 0)
411 				bt->version = 2;
412 			break;
413 		}
414 		trace_len += pdu_len + cgid_len;
415 		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
416 						  trace_len, trace_ctx);
417 		if (!event)
418 			return;
419 
420 		tracing_record_cmdline(current);
421 		switch (bt->version) {
422 		case 1:
423 			record_blktrace_event(ring_buffer_event_data(event),
424 					      pid, cpu, sector, bytes,
425 					      what, bt->dev, error, cgid, cgid_len,
426 					      pdu_data, pdu_len);
427 			break;
428 		case 2:
429 		default:
430 			/*
431 			 * Use v2 recording function (record_blktrace_event2)
432 			 * which writes blk_io_trace2 structure with correct
433 			 * field layout:
434 			 *   - 32-bit pid at offset 28
435 			 *   - 64-bit action at offset 32
436 			 *
437 			 * Fall through to default handles version==0 case
438 			 * (from sysfs path), ensuring we always use correct
439 			 * v2 recording function to match the v2 buffer
440 			 * allocated above.
441 			 */
442 			record_blktrace_event2(ring_buffer_event_data(event),
443 					       pid, cpu, sector, bytes,
444 					       what, bt->dev, error, cgid, cgid_len,
445 					       pdu_data, pdu_len);
446 			break;
447 		}
448 
449 		trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
450 		return;
451 	}
452 
453 	if (unlikely(tsk->btrace_seq != blktrace_seq))
454 		trace_note_tsk(tsk);
455 
456 	/*
457 	 * A word about the locking here - we disable interrupts to reserve
458 	 * some space in the relay per-cpu buffer, to prevent an irq
459 	 * from coming in and stepping on our toes.
460 	 */
461 	local_irq_save(flags);
462 	sequence = per_cpu_ptr(bt->sequence, cpu);
463 	(*sequence)++;
464 	relay_blktrace_event(bt, *sequence, pid, cpu, sector, bytes,
465 			     what, error, cgid, cgid_len, pdu_data, pdu_len);
466 	local_irq_restore(flags);
467 }
468 
blk_trace_free(struct request_queue * q,struct blk_trace * bt)469 static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
470 {
471 	relay_close(bt->rchan);
472 
473 	/*
474 	 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
475 	 * under 'q->debugfs_dir', thus lookup and remove them.
476 	 */
477 	if (!bt->dir) {
478 		debugfs_lookup_and_remove("dropped", q->debugfs_dir);
479 		debugfs_lookup_and_remove("msg", q->debugfs_dir);
480 	} else {
481 		debugfs_remove(bt->dir);
482 	}
483 	free_percpu(bt->sequence);
484 	free_percpu(bt->msg_data);
485 	kfree(bt);
486 }
487 
get_probe_ref(void)488 static void get_probe_ref(void)
489 {
490 	mutex_lock(&blk_probe_mutex);
491 	if (++blk_probes_ref == 1)
492 		blk_register_tracepoints();
493 	mutex_unlock(&blk_probe_mutex);
494 }
495 
put_probe_ref(void)496 static void put_probe_ref(void)
497 {
498 	mutex_lock(&blk_probe_mutex);
499 	if (!--blk_probes_ref)
500 		blk_unregister_tracepoints();
501 	mutex_unlock(&blk_probe_mutex);
502 }
503 
blk_trace_start(struct blk_trace * bt)504 static int blk_trace_start(struct blk_trace *bt)
505 {
506 	if (bt->trace_state != Blktrace_setup &&
507 	    bt->trace_state != Blktrace_stopped)
508 		return -EINVAL;
509 
510 	blktrace_seq++;
511 	smp_mb();
512 	bt->trace_state = Blktrace_running;
513 	raw_spin_lock_irq(&running_trace_lock);
514 	list_add(&bt->running_list, &running_trace_list);
515 	raw_spin_unlock_irq(&running_trace_lock);
516 	trace_note_time(bt);
517 
518 	return 0;
519 }
520 
blk_trace_stop(struct blk_trace * bt)521 static int blk_trace_stop(struct blk_trace *bt)
522 {
523 	if (bt->trace_state != Blktrace_running)
524 		return -EINVAL;
525 
526 	bt->trace_state = Blktrace_stopped;
527 	raw_spin_lock_irq(&running_trace_lock);
528 	list_del_init(&bt->running_list);
529 	raw_spin_unlock_irq(&running_trace_lock);
530 	relay_flush(bt->rchan);
531 
532 	return 0;
533 }
534 
blk_trace_cleanup(struct request_queue * q,struct blk_trace * bt)535 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
536 {
537 	blk_trace_stop(bt);
538 	synchronize_rcu();
539 	blk_trace_free(q, bt);
540 	put_probe_ref();
541 }
542 
__blk_trace_remove(struct request_queue * q)543 static int __blk_trace_remove(struct request_queue *q)
544 {
545 	struct blk_trace *bt;
546 
547 	bt = rcu_replace_pointer(q->blk_trace, NULL,
548 				 lockdep_is_held(&q->debugfs_mutex));
549 	if (!bt)
550 		return -EINVAL;
551 
552 	blk_trace_cleanup(q, bt);
553 
554 	return 0;
555 }
556 
blk_trace_remove(struct request_queue * q)557 int blk_trace_remove(struct request_queue *q)
558 {
559 	int ret;
560 
561 	blk_debugfs_lock_nomemsave(q);
562 	ret = __blk_trace_remove(q);
563 	blk_debugfs_unlock_nomemrestore(q);
564 
565 	return ret;
566 }
567 EXPORT_SYMBOL_GPL(blk_trace_remove);
568 
blk_dropped_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)569 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
570 				size_t count, loff_t *ppos)
571 {
572 	struct blk_trace *bt = filp->private_data;
573 	size_t dropped = relay_stats(bt->rchan, RELAY_STATS_BUF_FULL);
574 	char buf[16];
575 
576 	snprintf(buf, sizeof(buf), "%zu\n", dropped);
577 
578 	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
579 }
580 
581 static const struct file_operations blk_dropped_fops = {
582 	.owner =	THIS_MODULE,
583 	.open =		simple_open,
584 	.read =		blk_dropped_read,
585 	.llseek =	default_llseek,
586 };
587 
blk_msg_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)588 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
589 				size_t count, loff_t *ppos)
590 {
591 	char *msg;
592 	struct blk_trace *bt;
593 
594 	if (count >= BLK_TN_MAX_MSG)
595 		return -EINVAL;
596 
597 	msg = memdup_user_nul(buffer, count);
598 	if (IS_ERR(msg))
599 		return PTR_ERR(msg);
600 
601 	bt = filp->private_data;
602 	__blk_trace_note_message(bt, NULL, "%s", msg);
603 	kfree(msg);
604 
605 	return count;
606 }
607 
608 static const struct file_operations blk_msg_fops = {
609 	.owner =	THIS_MODULE,
610 	.open =		simple_open,
611 	.write =	blk_msg_write,
612 	.llseek =	noop_llseek,
613 };
614 
blk_remove_buf_file_callback(struct dentry * dentry)615 static int blk_remove_buf_file_callback(struct dentry *dentry)
616 {
617 	debugfs_remove(dentry);
618 
619 	return 0;
620 }
621 
blk_create_buf_file_callback(const char * filename,struct dentry * parent,umode_t mode,struct rchan_buf * buf,int * is_global)622 static struct dentry *blk_create_buf_file_callback(const char *filename,
623 						   struct dentry *parent,
624 						   umode_t mode,
625 						   struct rchan_buf *buf,
626 						   int *is_global)
627 {
628 	return debugfs_create_file(filename, mode, parent, buf,
629 					&relay_file_operations);
630 }
631 
632 static const struct rchan_callbacks blk_relay_callbacks = {
633 	.create_buf_file	= blk_create_buf_file_callback,
634 	.remove_buf_file	= blk_remove_buf_file_callback,
635 };
636 
blk_trace_setup_lba(struct blk_trace * bt,struct block_device * bdev)637 static void blk_trace_setup_lba(struct blk_trace *bt,
638 				struct block_device *bdev)
639 {
640 	if (bdev) {
641 		bt->start_lba = bdev->bd_start_sect;
642 		bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
643 	} else {
644 		bt->start_lba = 0;
645 		bt->end_lba = -1ULL;
646 	}
647 }
648 
649 /*
650  * Setup everything required to start tracing
651  */
blk_trace_setup_prepare(struct request_queue * q,char * name,dev_t dev,u32 buf_size,u32 buf_nr,struct block_device * bdev)652 static struct blk_trace *blk_trace_setup_prepare(struct request_queue *q,
653 						 char *name, dev_t dev,
654 						 u32 buf_size, u32 buf_nr,
655 						 struct block_device *bdev)
656 {
657 	struct blk_trace *bt = NULL;
658 	struct dentry *dir = NULL;
659 	int ret;
660 
661 	lockdep_assert_held(&q->debugfs_mutex);
662 
663 	/*
664 	 * bdev can be NULL, as with scsi-generic, this is a helpful as
665 	 * we can be.
666 	 */
667 	if (rcu_dereference_protected(q->blk_trace,
668 				      lockdep_is_held(&q->debugfs_mutex))) {
669 		pr_warn("Concurrent blktraces are not allowed on %s\n", name);
670 		return ERR_PTR(-EBUSY);
671 	}
672 
673 	bt = kzalloc_obj(*bt);
674 	if (!bt)
675 		return ERR_PTR(-ENOMEM);
676 
677 	ret = -ENOMEM;
678 	bt->sequence = alloc_percpu(unsigned long);
679 	if (!bt->sequence)
680 		goto err;
681 
682 	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
683 	if (!bt->msg_data)
684 		goto err;
685 
686 	/*
687 	 * When tracing the whole disk reuse the existing debugfs directory
688 	 * created by the block layer on init. For partitions block devices,
689 	 * and scsi-generic block devices we create a temporary new debugfs
690 	 * directory that will be removed once the trace ends.
691 	 */
692 	if (bdev && !bdev_is_partition(bdev))
693 		dir = q->debugfs_dir;
694 	else
695 		bt->dir = dir = debugfs_create_dir(name, blk_debugfs_root);
696 
697 	/*
698 	 * As blktrace relies on debugfs for its interface the debugfs directory
699 	 * is required, contrary to the usual mantra of not checking for debugfs
700 	 * files or directories.
701 	 */
702 	if (IS_ERR_OR_NULL(dir)) {
703 		pr_warn("debugfs_dir not present for %s so skipping\n", name);
704 		ret = -ENOENT;
705 		goto err;
706 	}
707 
708 	bt->dev = dev;
709 	INIT_LIST_HEAD(&bt->running_list);
710 
711 	ret = -EIO;
712 	debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
713 	debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
714 
715 	bt->rchan = relay_open("trace", dir, buf_size, buf_nr,
716 			       &blk_relay_callbacks, bt);
717 	if (!bt->rchan)
718 		goto err;
719 
720 	blk_trace_setup_lba(bt, bdev);
721 
722 	return bt;
723 
724 err:
725 	blk_trace_free(q, bt);
726 
727 	return ERR_PTR(ret);
728 }
729 
blk_trace_setup_finalize(struct request_queue * q,char * name,int version,struct blk_trace * bt,struct blk_user_trace_setup2 * buts)730 static void blk_trace_setup_finalize(struct request_queue *q,
731 				     char *name, int version,
732 				     struct blk_trace *bt,
733 				     struct blk_user_trace_setup2 *buts)
734 
735 {
736 	strscpy_pad(buts->name, name, BLKTRACE_BDEV_SIZE2);
737 
738 	/*
739 	 * some device names have larger paths - convert the slashes
740 	 * to underscores for this to work as expected
741 	 */
742 	strreplace(buts->name, '/', '_');
743 
744 	bt->version = version;
745 	bt->act_mask = buts->act_mask;
746 	if (!bt->act_mask)
747 		bt->act_mask = (u16) -1;
748 
749 	/* overwrite with user settings */
750 	if (buts->start_lba)
751 		bt->start_lba = buts->start_lba;
752 	if (buts->end_lba)
753 		bt->end_lba = buts->end_lba;
754 
755 	bt->pid = buts->pid;
756 	bt->trace_state = Blktrace_setup;
757 
758 	rcu_assign_pointer(q->blk_trace, bt);
759 	get_probe_ref();
760 }
761 
blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)762 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
763 		    struct block_device *bdev,
764 		    char __user *arg)
765 {
766 	struct blk_user_trace_setup2 buts2;
767 	struct blk_user_trace_setup buts;
768 	struct blk_trace *bt;
769 	unsigned int memflags;
770 	int ret;
771 
772 	ret = copy_from_user(&buts, arg, sizeof(buts));
773 	if (ret)
774 		return -EFAULT;
775 
776 	if (!buts.buf_size || !buts.buf_nr)
777 		return -EINVAL;
778 
779 	buts2 = (struct blk_user_trace_setup2) {
780 		.act_mask = buts.act_mask,
781 		.buf_size = buts.buf_size,
782 		.buf_nr = buts.buf_nr,
783 		.start_lba = buts.start_lba,
784 		.end_lba = buts.end_lba,
785 		.pid = buts.pid,
786 	};
787 
788 	memflags = blk_debugfs_lock(q);
789 	bt = blk_trace_setup_prepare(q, name, dev, buts.buf_size, buts.buf_nr,
790 				     bdev);
791 	if (IS_ERR(bt)) {
792 		blk_debugfs_unlock(q, memflags);
793 		return PTR_ERR(bt);
794 	}
795 	blk_trace_setup_finalize(q, name, 1, bt, &buts2);
796 	strscpy(buts.name, buts2.name, BLKTRACE_BDEV_SIZE);
797 	blk_debugfs_unlock(q, memflags);
798 
799 	if (copy_to_user(arg, &buts, sizeof(buts))) {
800 		blk_trace_remove(q);
801 		return -EFAULT;
802 	}
803 	return 0;
804 }
805 EXPORT_SYMBOL_GPL(blk_trace_setup);
806 
blk_trace_setup2(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)807 static int blk_trace_setup2(struct request_queue *q, char *name, dev_t dev,
808 			    struct block_device *bdev, char __user *arg)
809 {
810 	struct blk_user_trace_setup2 buts2;
811 	struct blk_trace *bt;
812 	unsigned int memflags;
813 
814 	if (copy_from_user(&buts2, arg, sizeof(buts2)))
815 		return -EFAULT;
816 
817 	if (!buts2.buf_size || !buts2.buf_nr)
818 		return -EINVAL;
819 
820 	if (buts2.flags != 0)
821 		return -EINVAL;
822 
823 	memflags = blk_debugfs_lock(q);
824 	bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
825 				     bdev);
826 	if (IS_ERR(bt)) {
827 		blk_debugfs_unlock(q, memflags);
828 		return PTR_ERR(bt);
829 	}
830 	blk_trace_setup_finalize(q, name, 2, bt, &buts2);
831 	blk_debugfs_unlock(q, memflags);
832 
833 	if (copy_to_user(arg, &buts2, sizeof(buts2))) {
834 		blk_trace_remove(q);
835 		return -EFAULT;
836 	}
837 	return 0;
838 }
839 
840 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
compat_blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)841 static int compat_blk_trace_setup(struct request_queue *q, char *name,
842 				  dev_t dev, struct block_device *bdev,
843 				  char __user *arg)
844 {
845 	struct blk_user_trace_setup2 buts2;
846 	struct compat_blk_user_trace_setup cbuts;
847 	struct blk_trace *bt;
848 	unsigned int memflags;
849 
850 	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
851 		return -EFAULT;
852 
853 	if (!cbuts.buf_size || !cbuts.buf_nr)
854 		return -EINVAL;
855 
856 	buts2 = (struct blk_user_trace_setup2) {
857 		.act_mask = cbuts.act_mask,
858 		.buf_size = cbuts.buf_size,
859 		.buf_nr = cbuts.buf_nr,
860 		.start_lba = cbuts.start_lba,
861 		.end_lba = cbuts.end_lba,
862 		.pid = cbuts.pid,
863 	};
864 
865 	memflags = blk_debugfs_lock(q);
866 	bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
867 				     bdev);
868 	if (IS_ERR(bt)) {
869 		blk_debugfs_unlock(q, memflags);
870 		return PTR_ERR(bt);
871 	}
872 	blk_trace_setup_finalize(q, name, 1, bt, &buts2);
873 	blk_debugfs_unlock(q, memflags);
874 
875 	if (copy_to_user(arg, &buts2.name, ARRAY_SIZE(buts2.name))) {
876 		blk_trace_remove(q);
877 		return -EFAULT;
878 	}
879 
880 	return 0;
881 }
882 #endif
883 
__blk_trace_startstop(struct request_queue * q,int start)884 static int __blk_trace_startstop(struct request_queue *q, int start)
885 {
886 	struct blk_trace *bt;
887 
888 	bt = rcu_dereference_protected(q->blk_trace,
889 				       lockdep_is_held(&q->debugfs_mutex));
890 	if (bt == NULL)
891 		return -EINVAL;
892 
893 	if (start)
894 		return blk_trace_start(bt);
895 	else
896 		return blk_trace_stop(bt);
897 }
898 
blk_trace_startstop(struct request_queue * q,int start)899 int blk_trace_startstop(struct request_queue *q, int start)
900 {
901 	int ret;
902 
903 	blk_debugfs_lock_nomemsave(q);
904 	ret = __blk_trace_startstop(q, start);
905 	blk_debugfs_unlock_nomemrestore(q);
906 
907 	return ret;
908 }
909 EXPORT_SYMBOL_GPL(blk_trace_startstop);
910 
911 /*
912  * When reading or writing the blktrace sysfs files, the references to the
913  * opened sysfs or device files should prevent the underlying block device
914  * from being removed. So no further delete protection is really needed.
915  */
916 
917 /**
918  * blk_trace_ioctl - handle the ioctls associated with tracing
919  * @bdev:	the block device
920  * @cmd:	the ioctl cmd
921  * @arg:	the argument data, if any
922  *
923  **/
blk_trace_ioctl(struct block_device * bdev,unsigned cmd,char __user * arg)924 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
925 {
926 	struct request_queue *q = bdev_get_queue(bdev);
927 	int ret, start = 0;
928 	char b[BDEVNAME_SIZE];
929 
930 	switch (cmd) {
931 	case BLKTRACESETUP2:
932 		snprintf(b, sizeof(b), "%pg", bdev);
933 		ret = blk_trace_setup2(q, b, bdev->bd_dev, bdev, arg);
934 		break;
935 	case BLKTRACESETUP:
936 		snprintf(b, sizeof(b), "%pg", bdev);
937 		ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
938 		break;
939 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
940 	case BLKTRACESETUP32:
941 		snprintf(b, sizeof(b), "%pg", bdev);
942 		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
943 		break;
944 #endif
945 	case BLKTRACESTART:
946 		start = 1;
947 		fallthrough;
948 	case BLKTRACESTOP:
949 		ret = blk_trace_startstop(q, start);
950 		break;
951 	case BLKTRACETEARDOWN:
952 		ret = blk_trace_remove(q);
953 		break;
954 	default:
955 		ret = -ENOTTY;
956 		break;
957 	}
958 	return ret;
959 }
960 
961 /**
962  * blk_trace_shutdown - stop and cleanup trace structures
963  * @q:    the request queue associated with the device
964  *
965  **/
blk_trace_shutdown(struct request_queue * q)966 void blk_trace_shutdown(struct request_queue *q)
967 {
968 	if (rcu_dereference_protected(q->blk_trace,
969 				      lockdep_is_held(&q->debugfs_mutex)))
970 		__blk_trace_remove(q);
971 }
972 
973 #ifdef CONFIG_BLK_CGROUP
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)974 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
975 {
976 	struct cgroup_subsys_state *blkcg_css;
977 	struct blk_trace *bt;
978 
979 	/* We don't use the 'bt' value here except as an optimization... */
980 	bt = rcu_dereference_protected(q->blk_trace, 1);
981 	if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
982 		return 0;
983 
984 	blkcg_css = bio_blkcg_css(bio);
985 	if (!blkcg_css)
986 		return 0;
987 	return cgroup_id(blkcg_css->cgroup);
988 }
989 #else
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)990 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
991 {
992 	return 0;
993 }
994 #endif
995 
996 static u64
blk_trace_request_get_cgid(struct request * rq)997 blk_trace_request_get_cgid(struct request *rq)
998 {
999 	if (!rq->bio)
1000 		return 0;
1001 	/* Use the first bio */
1002 	return blk_trace_bio_get_cgid(rq->q, rq->bio);
1003 }
1004 
1005 /*
1006  * blktrace probes
1007  */
1008 
1009 /**
1010  * blk_add_trace_rq - Add a trace for a request oriented action
1011  * @rq:		the source request
1012  * @error:	return status to log
1013  * @nr_bytes:	number of completed bytes
1014  * @what:	the action
1015  * @cgid:	the cgroup info
1016  *
1017  * Description:
1018  *     Records an action against a request. Will log the bio offset + size.
1019  *
1020  **/
blk_add_trace_rq(struct request * rq,blk_status_t error,unsigned int nr_bytes,u64 what,u64 cgid)1021 static void blk_add_trace_rq(struct request *rq, blk_status_t error,
1022 			     unsigned int nr_bytes, u64 what, u64 cgid)
1023 {
1024 	struct blk_trace *bt;
1025 
1026 	rcu_read_lock();
1027 	bt = rcu_dereference(rq->q->blk_trace);
1028 	if (likely(!bt)) {
1029 		rcu_read_unlock();
1030 		return;
1031 	}
1032 
1033 	if (blk_rq_is_passthrough(rq))
1034 		what |= BLK_TC_ACT(BLK_TC_PC);
1035 	else
1036 		what |= BLK_TC_ACT(BLK_TC_FS);
1037 
1038 	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
1039 			what, blk_status_to_errno(error), 0, NULL, cgid);
1040 	rcu_read_unlock();
1041 }
1042 
blk_add_trace_rq_insert(void * ignore,struct request * rq)1043 static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
1044 {
1045 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
1046 			 blk_trace_request_get_cgid(rq));
1047 }
1048 
blk_add_trace_rq_issue(void * ignore,struct request * rq)1049 static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
1050 {
1051 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
1052 			 blk_trace_request_get_cgid(rq));
1053 }
1054 
blk_add_trace_rq_merge(void * ignore,struct request * rq)1055 static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
1056 {
1057 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
1058 			 blk_trace_request_get_cgid(rq));
1059 }
1060 
blk_add_trace_rq_requeue(void * ignore,struct request * rq)1061 static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
1062 {
1063 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
1064 			 blk_trace_request_get_cgid(rq));
1065 }
1066 
blk_add_trace_rq_complete(void * ignore,struct request * rq,blk_status_t error,unsigned int nr_bytes)1067 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
1068 			blk_status_t error, unsigned int nr_bytes)
1069 {
1070 	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
1071 			 blk_trace_request_get_cgid(rq));
1072 }
1073 
blk_add_trace_zone_update_request(void * ignore,struct request * rq)1074 static void blk_add_trace_zone_update_request(void *ignore, struct request *rq)
1075 {
1076 	struct blk_trace *bt;
1077 
1078 	rcu_read_lock();
1079 	bt = rcu_dereference(rq->q->blk_trace);
1080 	if (likely(!bt) || bt->version < 2) {
1081 		rcu_read_unlock();
1082 		return;
1083 	}
1084 	rcu_read_unlock();
1085 
1086 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ZONE_APPEND,
1087 			 blk_trace_request_get_cgid(rq));
1088 }
1089 
1090 /**
1091  * blk_add_trace_bio - Add a trace for a bio oriented action
1092  * @q:		queue the io is for
1093  * @bio:	the source bio
1094  * @what:	the action
1095  * @error:	error, if any
1096  *
1097  * Description:
1098  *     Records an action against a bio. Will log the bio offset + size.
1099  *
1100  **/
blk_add_trace_bio(struct request_queue * q,struct bio * bio,u64 what,int error)1101 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
1102 			      u64 what, int error)
1103 {
1104 	struct blk_trace *bt;
1105 
1106 	rcu_read_lock();
1107 	bt = rcu_dereference(q->blk_trace);
1108 	if (likely(!bt)) {
1109 		rcu_read_unlock();
1110 		return;
1111 	}
1112 
1113 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1114 			bio->bi_opf, what, error, 0, NULL,
1115 			blk_trace_bio_get_cgid(q, bio));
1116 	rcu_read_unlock();
1117 }
1118 
blk_add_trace_bio_complete(void * ignore,struct request_queue * q,struct bio * bio)1119 static void blk_add_trace_bio_complete(void *ignore,
1120 				       struct request_queue *q, struct bio *bio)
1121 {
1122 	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
1123 			  blk_status_to_errno(bio->bi_status));
1124 }
1125 
blk_add_trace_bio_backmerge(void * ignore,struct bio * bio)1126 static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
1127 {
1128 	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
1129 			0);
1130 }
1131 
blk_add_trace_bio_frontmerge(void * ignore,struct bio * bio)1132 static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
1133 {
1134 	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
1135 			0);
1136 }
1137 
blk_add_trace_bio_queue(void * ignore,struct bio * bio)1138 static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
1139 {
1140 	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
1141 }
1142 
blk_add_trace_getrq(void * ignore,struct bio * bio)1143 static void blk_add_trace_getrq(void *ignore, struct bio *bio)
1144 {
1145 	blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
1146 }
1147 
blk_add_trace_plug(void * ignore,struct request_queue * q)1148 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
1149 {
1150 	struct blk_trace *bt;
1151 
1152 	rcu_read_lock();
1153 	bt = rcu_dereference(q->blk_trace);
1154 	if (bt)
1155 		__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
1156 	rcu_read_unlock();
1157 }
1158 
blk_add_trace_unplug(void * ignore,struct request_queue * q,unsigned int depth,bool explicit)1159 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
1160 				    unsigned int depth, bool explicit)
1161 {
1162 	struct blk_trace *bt;
1163 
1164 	rcu_read_lock();
1165 	bt = rcu_dereference(q->blk_trace);
1166 	if (bt) {
1167 		__be64 rpdu = cpu_to_be64(depth);
1168 		u64 what;
1169 
1170 		if (explicit)
1171 			what = BLK_TA_UNPLUG_IO;
1172 		else
1173 			what = BLK_TA_UNPLUG_TIMER;
1174 
1175 		__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
1176 	}
1177 	rcu_read_unlock();
1178 }
1179 
blk_add_trace_zone_plug(void * ignore,struct request_queue * q,unsigned int zno,sector_t sector,unsigned int sectors)1180 static void blk_add_trace_zone_plug(void *ignore, struct request_queue *q,
1181 				    unsigned int zno, sector_t sector,
1182 				    unsigned int sectors)
1183 {
1184 	struct blk_trace *bt;
1185 
1186 	rcu_read_lock();
1187 	bt = rcu_dereference(q->blk_trace);
1188 	if (bt && bt->version >= 2)
1189 		__blk_add_trace(bt, sector, sectors << SECTOR_SHIFT, 0,
1190 				BLK_TA_ZONE_PLUG, 0, 0, NULL, 0);
1191 	rcu_read_unlock();
1192 
1193 	return;
1194 }
1195 
blk_add_trace_zone_unplug(void * ignore,struct request_queue * q,unsigned int zno,sector_t sector,unsigned int sectors)1196 static void blk_add_trace_zone_unplug(void *ignore, struct request_queue *q,
1197 				      unsigned int zno, sector_t sector,
1198 				      unsigned int sectors)
1199 {
1200 	struct blk_trace *bt;
1201 
1202 	rcu_read_lock();
1203 	bt = rcu_dereference(q->blk_trace);
1204 	if (bt && bt->version >= 2)
1205 		__blk_add_trace(bt, sector, sectors << SECTOR_SHIFT, 0,
1206 				BLK_TA_ZONE_UNPLUG, 0, 0, NULL, 0);
1207 	rcu_read_unlock();
1208 	return;
1209 }
1210 
blk_add_trace_split(void * ignore,struct bio * bio,unsigned int pdu)1211 static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
1212 {
1213 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1214 	struct blk_trace *bt;
1215 
1216 	rcu_read_lock();
1217 	bt = rcu_dereference(q->blk_trace);
1218 	if (bt) {
1219 		__be64 rpdu = cpu_to_be64(pdu);
1220 
1221 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
1222 				bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
1223 				blk_status_to_errno(bio->bi_status),
1224 				sizeof(rpdu), &rpdu,
1225 				blk_trace_bio_get_cgid(q, bio));
1226 	}
1227 	rcu_read_unlock();
1228 }
1229 
1230 /**
1231  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1232  * @ignore:	trace callback data parameter (not used)
1233  * @bio:	the source bio
1234  * @dev:	source device
1235  * @from:	source sector
1236  *
1237  * Called after a bio is remapped to a different device and/or sector.
1238  **/
blk_add_trace_bio_remap(void * ignore,struct bio * bio,dev_t dev,sector_t from)1239 static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
1240 				    sector_t from)
1241 {
1242 	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1243 	struct blk_trace *bt;
1244 	struct blk_io_trace_remap r;
1245 
1246 	rcu_read_lock();
1247 	bt = rcu_dereference(q->blk_trace);
1248 	if (likely(!bt)) {
1249 		rcu_read_unlock();
1250 		return;
1251 	}
1252 
1253 	r.device_from = cpu_to_be32(dev);
1254 	r.device_to   = cpu_to_be32(bio_dev(bio));
1255 	r.sector_from = cpu_to_be64(from);
1256 
1257 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1258 			bio->bi_opf, BLK_TA_REMAP,
1259 			blk_status_to_errno(bio->bi_status),
1260 			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1261 	rcu_read_unlock();
1262 }
1263 
1264 /**
1265  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1266  * @ignore:	trace callback data parameter (not used)
1267  * @rq:		the source request
1268  * @dev:	target device
1269  * @from:	source sector
1270  *
1271  * Description:
1272  *     Device mapper remaps request to other devices.
1273  *     Add a trace for that action.
1274  *
1275  **/
blk_add_trace_rq_remap(void * ignore,struct request * rq,dev_t dev,sector_t from)1276 static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
1277 				   sector_t from)
1278 {
1279 	struct blk_trace *bt;
1280 	struct blk_io_trace_remap r;
1281 
1282 	rcu_read_lock();
1283 	bt = rcu_dereference(rq->q->blk_trace);
1284 	if (likely(!bt)) {
1285 		rcu_read_unlock();
1286 		return;
1287 	}
1288 
1289 	r.device_from = cpu_to_be32(dev);
1290 	r.device_to   = cpu_to_be32(disk_devt(rq->q->disk));
1291 	r.sector_from = cpu_to_be64(from);
1292 
1293 	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1294 			rq->cmd_flags, BLK_TA_REMAP, 0,
1295 			sizeof(r), &r, blk_trace_request_get_cgid(rq));
1296 	rcu_read_unlock();
1297 }
1298 
1299 /**
1300  * blk_add_driver_data - Add binary message with driver-specific data
1301  * @rq:		io request
1302  * @data:	driver-specific data
1303  * @len:	length of driver-specific data
1304  *
1305  * Description:
1306  *     Some drivers might want to write driver-specific data per request.
1307  *
1308  **/
blk_add_driver_data(struct request * rq,void * data,size_t len)1309 void blk_add_driver_data(struct request *rq, void *data, size_t len)
1310 {
1311 	struct blk_trace *bt;
1312 
1313 	rcu_read_lock();
1314 	bt = rcu_dereference(rq->q->blk_trace);
1315 	if (likely(!bt)) {
1316 		rcu_read_unlock();
1317 		return;
1318 	}
1319 
1320 	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
1321 				BLK_TA_DRV_DATA, 0, len, data,
1322 				blk_trace_request_get_cgid(rq));
1323 	rcu_read_unlock();
1324 }
1325 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1326 
blk_register_tracepoints(void)1327 static void blk_register_tracepoints(void)
1328 {
1329 	int ret;
1330 
1331 	ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1332 	WARN_ON(ret);
1333 	ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1334 	WARN_ON(ret);
1335 	ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1336 	WARN_ON(ret);
1337 	ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1338 	WARN_ON(ret);
1339 	ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1340 	WARN_ON(ret);
1341 	ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1342 	WARN_ON(ret);
1343 	ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1344 	WARN_ON(ret);
1345 	ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1346 	WARN_ON(ret);
1347 	ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1348 	WARN_ON(ret);
1349 	ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1350 	WARN_ON(ret);
1351 	ret = register_trace_blk_zone_append_update_request_bio(
1352 		blk_add_trace_zone_update_request, NULL);
1353 	WARN_ON(ret);
1354 	ret = register_trace_disk_zone_wplug_add_bio(blk_add_trace_zone_plug,
1355 						     NULL);
1356 	WARN_ON(ret);
1357 	ret = register_trace_blk_zone_wplug_bio(blk_add_trace_zone_unplug,
1358 						NULL);
1359 	WARN_ON(ret);
1360 	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1361 	WARN_ON(ret);
1362 	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1363 	WARN_ON(ret);
1364 	ret = register_trace_block_split(blk_add_trace_split, NULL);
1365 	WARN_ON(ret);
1366 	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1367 	WARN_ON(ret);
1368 	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1369 	WARN_ON(ret);
1370 }
1371 
blk_unregister_tracepoints(void)1372 static void blk_unregister_tracepoints(void)
1373 {
1374 	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1375 	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1376 	unregister_trace_block_split(blk_add_trace_split, NULL);
1377 	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1378 	unregister_trace_block_plug(blk_add_trace_plug, NULL);
1379 	unregister_trace_blk_zone_wplug_bio(blk_add_trace_zone_unplug, NULL);
1380 	unregister_trace_disk_zone_wplug_add_bio(blk_add_trace_zone_plug, NULL);
1381 	unregister_trace_blk_zone_append_update_request_bio(
1382 		blk_add_trace_zone_update_request, NULL);
1383 	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1384 	unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1385 	unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1386 	unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1387 	unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1388 	unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1389 	unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1390 	unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1391 	unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1392 	unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1393 
1394 	tracepoint_synchronize_unregister();
1395 }
1396 
1397 /*
1398  * struct blk_io_tracer formatting routines
1399  */
1400 
fill_rwbs(char * rwbs,const struct blk_io_trace2 * t)1401 static void fill_rwbs(char *rwbs, const struct blk_io_trace2 *t)
1402 {
1403 	int i = 0;
1404 	int tc = t->action >> BLK_TC_SHIFT;
1405 
1406 	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1407 		rwbs[i++] = 'N';
1408 		goto out;
1409 	}
1410 
1411 	if (tc & BLK_TC_FLUSH)
1412 		rwbs[i++] = 'F';
1413 
1414 	if (tc & BLK_TC_DISCARD)
1415 		rwbs[i++] = 'D';
1416 	else if (tc & BLK_TC_WRITE_ZEROES) {
1417 		rwbs[i++] = 'W';
1418 		rwbs[i++] = 'Z';
1419 	} else if (tc & BLK_TC_WRITE)
1420 		rwbs[i++] = 'W';
1421 	else if (t->bytes)
1422 		rwbs[i++] = 'R';
1423 	else
1424 		rwbs[i++] = 'N';
1425 
1426 	if (tc & BLK_TC_FUA)
1427 		rwbs[i++] = 'F';
1428 	if (tc & BLK_TC_AHEAD)
1429 		rwbs[i++] = 'A';
1430 	if (tc & BLK_TC_SYNC)
1431 		rwbs[i++] = 'S';
1432 	if (tc & BLK_TC_META)
1433 		rwbs[i++] = 'M';
1434 out:
1435 	rwbs[i] = '\0';
1436 }
1437 
1438 static inline
te_blk_io_trace(const struct trace_entry * ent)1439 const struct blk_io_trace2 *te_blk_io_trace(const struct trace_entry *ent)
1440 {
1441 	return (const struct blk_io_trace2 *)ent;
1442 }
1443 
pdu_start(const struct trace_entry * ent,bool has_cg)1444 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1445 {
1446 	return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1447 }
1448 
t_cgid(const struct trace_entry * ent)1449 static inline u64 t_cgid(const struct trace_entry *ent)
1450 {
1451 	return *(u64 *)(te_blk_io_trace(ent) + 1);
1452 }
1453 
pdu_real_len(const struct trace_entry * ent,bool has_cg)1454 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1455 {
1456 	return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1457 }
1458 
t_action(const struct trace_entry * ent)1459 static inline u32 t_action(const struct trace_entry *ent)
1460 {
1461 	return te_blk_io_trace(ent)->action;
1462 }
1463 
t_bytes(const struct trace_entry * ent)1464 static inline u32 t_bytes(const struct trace_entry *ent)
1465 {
1466 	return te_blk_io_trace(ent)->bytes;
1467 }
1468 
t_sec(const struct trace_entry * ent)1469 static inline u32 t_sec(const struct trace_entry *ent)
1470 {
1471 	return te_blk_io_trace(ent)->bytes >> 9;
1472 }
1473 
t_sector(const struct trace_entry * ent)1474 static inline unsigned long long t_sector(const struct trace_entry *ent)
1475 {
1476 	return te_blk_io_trace(ent)->sector;
1477 }
1478 
t_error(const struct trace_entry * ent)1479 static inline __u16 t_error(const struct trace_entry *ent)
1480 {
1481 	return te_blk_io_trace(ent)->error;
1482 }
1483 
get_pdu_int(const struct trace_entry * ent,bool has_cg)1484 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1485 {
1486 	const __be64 *val = pdu_start(ent, has_cg);
1487 	return be64_to_cpu(*val);
1488 }
1489 
1490 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1491 	bool has_cg);
1492 
blk_log_action_classic(struct trace_iterator * iter,const char * act,bool has_cg)1493 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1494 	bool has_cg)
1495 {
1496 	char rwbs[RWBS_LEN];
1497 	unsigned long long ts  = iter->ts;
1498 	unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1499 	unsigned secs	       = (unsigned long)ts;
1500 	const struct blk_io_trace2 *t = te_blk_io_trace(iter->ent);
1501 
1502 	fill_rwbs(rwbs, t);
1503 
1504 	trace_seq_printf(&iter->seq,
1505 			 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1506 			 MAJOR(t->device), MINOR(t->device), iter->cpu,
1507 			 secs, nsec_rem, iter->ent->pid, act, rwbs);
1508 }
1509 
blk_log_action(struct trace_iterator * iter,const char * act,bool has_cg)1510 static void blk_log_action(struct trace_iterator *iter, const char *act,
1511 	bool has_cg)
1512 {
1513 	char rwbs[RWBS_LEN];
1514 	const struct blk_io_trace2 *t = te_blk_io_trace(iter->ent);
1515 
1516 	fill_rwbs(rwbs, t);
1517 	if (has_cg) {
1518 		u64 id = t_cgid(iter->ent);
1519 
1520 		if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1521 			char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1522 
1523 			cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1524 				sizeof(blkcg_name_buf));
1525 			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1526 				 MAJOR(t->device), MINOR(t->device),
1527 				 blkcg_name_buf, act, rwbs);
1528 		} else {
1529 			/*
1530 			 * The cgid portion used to be "INO,GEN".  Userland
1531 			 * builds a FILEID_INO32_GEN fid out of them and
1532 			 * opens the cgroup using open_by_handle_at(2).
1533 			 * While 32bit ino setups are still the same, 64bit
1534 			 * ones now use the 64bit ino as the whole ID and
1535 			 * no longer use generation.
1536 			 *
1537 			 * Regardless of the content, always output
1538 			 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1539 			 * be mapped back to @id on both 64 and 32bit ino
1540 			 * setups.  See __kernfs_fh_to_dentry().
1541 			 */
1542 			trace_seq_printf(&iter->seq,
1543 				 "%3d,%-3d %llx,%-llx %2s %3s ",
1544 				 MAJOR(t->device), MINOR(t->device),
1545 				 id & U32_MAX, id >> 32, act, rwbs);
1546 		}
1547 	} else
1548 		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1549 				 MAJOR(t->device), MINOR(t->device), act, rwbs);
1550 }
1551 
blk_log_dump_pdu(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1552 static void blk_log_dump_pdu(struct trace_seq *s,
1553 	const struct trace_entry *ent, bool has_cg)
1554 {
1555 	const unsigned char *pdu_buf;
1556 	int pdu_len;
1557 	int i, end;
1558 
1559 	pdu_buf = pdu_start(ent, has_cg);
1560 	pdu_len = pdu_real_len(ent, has_cg);
1561 
1562 	if (!pdu_len)
1563 		return;
1564 
1565 	/* find the last zero that needs to be printed */
1566 	for (end = pdu_len - 1; end >= 0; end--)
1567 		if (pdu_buf[end])
1568 			break;
1569 	end++;
1570 
1571 	trace_seq_putc(s, '(');
1572 
1573 	for (i = 0; i < pdu_len; i++) {
1574 
1575 		trace_seq_printf(s, "%s%02x",
1576 				 i == 0 ? "" : " ", pdu_buf[i]);
1577 
1578 		/*
1579 		 * stop when the rest is just zeros and indicate so
1580 		 * with a ".." appended
1581 		 */
1582 		if (i == end && end != pdu_len - 1) {
1583 			trace_seq_puts(s, " ..) ");
1584 			return;
1585 		}
1586 	}
1587 
1588 	trace_seq_puts(s, ") ");
1589 }
1590 
blk_log_generic(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1591 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1592 {
1593 	char cmd[TASK_COMM_LEN];
1594 
1595 	trace_find_cmdline(ent->pid, cmd);
1596 
1597 	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1598 		trace_seq_printf(s, "%u ", t_bytes(ent));
1599 		blk_log_dump_pdu(s, ent, has_cg);
1600 		trace_seq_printf(s, "[%s]\n", cmd);
1601 	} else {
1602 		if (t_sec(ent))
1603 			trace_seq_printf(s, "%llu + %u [%s]\n",
1604 						t_sector(ent), t_sec(ent), cmd);
1605 		else
1606 			trace_seq_printf(s, "[%s]\n", cmd);
1607 	}
1608 }
1609 
blk_log_with_error(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1610 static void blk_log_with_error(struct trace_seq *s,
1611 			      const struct trace_entry *ent, bool has_cg)
1612 {
1613 	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1614 		blk_log_dump_pdu(s, ent, has_cg);
1615 		trace_seq_printf(s, "[%d]\n", t_error(ent));
1616 	} else {
1617 		if (t_sec(ent))
1618 			trace_seq_printf(s, "%llu + %u [%d]\n",
1619 					 t_sector(ent),
1620 					 t_sec(ent), t_error(ent));
1621 		else
1622 			trace_seq_printf(s, "%llu [%d]\n",
1623 					 t_sector(ent), t_error(ent));
1624 	}
1625 }
1626 
blk_log_remap(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1627 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1628 {
1629 	const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1630 
1631 	trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1632 			 t_sector(ent), t_sec(ent),
1633 			 MAJOR(be32_to_cpu(__r->device_from)),
1634 			 MINOR(be32_to_cpu(__r->device_from)),
1635 			 be64_to_cpu(__r->sector_from));
1636 }
1637 
blk_log_plug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1638 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1639 {
1640 	char cmd[TASK_COMM_LEN];
1641 
1642 	trace_find_cmdline(ent->pid, cmd);
1643 
1644 	trace_seq_printf(s, "[%s]\n", cmd);
1645 }
1646 
blk_log_unplug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1647 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1648 {
1649 	char cmd[TASK_COMM_LEN];
1650 
1651 	trace_find_cmdline(ent->pid, cmd);
1652 
1653 	trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1654 }
1655 
blk_log_split(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1656 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1657 {
1658 	char cmd[TASK_COMM_LEN];
1659 
1660 	trace_find_cmdline(ent->pid, cmd);
1661 
1662 	trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1663 			 get_pdu_int(ent, has_cg), cmd);
1664 }
1665 
blk_log_msg(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1666 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1667 			bool has_cg)
1668 {
1669 
1670 	trace_seq_putmem(s, pdu_start(ent, has_cg),
1671 		pdu_real_len(ent, has_cg));
1672 	trace_seq_putc(s, '\n');
1673 }
1674 
1675 /*
1676  * struct tracer operations
1677  */
1678 
blk_tracer_print_header(struct seq_file * m)1679 static void blk_tracer_print_header(struct seq_file *m)
1680 {
1681 	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1682 		return;
1683 	seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1684 		    "#  |     |     |           |   |   |\n");
1685 }
1686 
blk_tracer_start(struct trace_array * tr)1687 static void blk_tracer_start(struct trace_array *tr)
1688 {
1689 	blk_tracer_enabled = true;
1690 }
1691 
blk_tracer_init(struct trace_array * tr)1692 static int blk_tracer_init(struct trace_array *tr)
1693 {
1694 	blk_tr = tr;
1695 	blk_tracer_start(tr);
1696 	return 0;
1697 }
1698 
blk_tracer_stop(struct trace_array * tr)1699 static void blk_tracer_stop(struct trace_array *tr)
1700 {
1701 	blk_tracer_enabled = false;
1702 }
1703 
blk_tracer_reset(struct trace_array * tr)1704 static void blk_tracer_reset(struct trace_array *tr)
1705 {
1706 	blk_tracer_stop(tr);
1707 }
1708 
1709 static const struct {
1710 	const char *act[2];
1711 	void	   (*print)(struct trace_seq *s, const struct trace_entry *ent,
1712 			    bool has_cg);
1713 } what2act[] = {
1714 	[__BLK_TA_QUEUE]	= {{  "Q", "queue" },	   blk_log_generic },
1715 	[__BLK_TA_BACKMERGE]	= {{  "M", "backmerge" },  blk_log_generic },
1716 	[__BLK_TA_FRONTMERGE]	= {{  "F", "frontmerge" }, blk_log_generic },
1717 	[__BLK_TA_GETRQ]	= {{  "G", "getrq" },	   blk_log_generic },
1718 	[__BLK_TA_SLEEPRQ]	= {{  "S", "sleeprq" },	   blk_log_generic },
1719 	[__BLK_TA_REQUEUE]	= {{  "R", "requeue" },	   blk_log_with_error },
1720 	[__BLK_TA_ISSUE]	= {{  "D", "issue" },	   blk_log_generic },
1721 	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
1722 	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
1723 	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
1724 	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
1725 	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
1726 	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
1727 	[__BLK_TA_REMAP]	= {{  "A", "remap" },	   blk_log_remap },
1728 };
1729 
print_one_line(struct trace_iterator * iter,bool classic)1730 static enum print_line_t print_one_line(struct trace_iterator *iter,
1731 					bool classic)
1732 {
1733 	struct trace_array *tr = iter->tr;
1734 	struct trace_seq *s = &iter->seq;
1735 	const struct blk_io_trace2 *t;
1736 	u16 what;
1737 	bool long_act;
1738 	blk_log_action_t *log_action;
1739 	bool has_cg;
1740 
1741 	t	   = te_blk_io_trace(iter->ent);
1742 	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1743 	long_act   = !!(tr->trace_flags & TRACE_ITER(VERBOSE));
1744 	log_action = classic ? &blk_log_action_classic : &blk_log_action;
1745 	has_cg	   = t->action & __BLK_TA_CGROUP;
1746 
1747 	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1748 		log_action(iter, long_act ? "message" : "m", has_cg);
1749 		blk_log_msg(s, iter->ent, has_cg);
1750 		return trace_handle_return(s);
1751 	}
1752 
1753 	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1754 		trace_seq_printf(s, "Unknown action %x\n", what);
1755 	else {
1756 		log_action(iter, what2act[what].act[long_act], has_cg);
1757 		what2act[what].print(s, iter->ent, has_cg);
1758 	}
1759 
1760 	return trace_handle_return(s);
1761 }
1762 
blk_trace_event_print(struct trace_iterator * iter,int flags,struct trace_event * event)1763 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1764 					       int flags, struct trace_event *event)
1765 {
1766 	return print_one_line(iter, false);
1767 }
1768 
blk_trace_synthesize_old_trace(struct trace_iterator * iter)1769 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1770 {
1771 	struct trace_seq *s = &iter->seq;
1772 	struct blk_io_trace2 *t = (struct blk_io_trace2 *)iter->ent;
1773 	const int offset = offsetof(struct blk_io_trace2, sector);
1774 	struct blk_io_trace old = {
1775 		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1776 		.time     = iter->ts,
1777 	};
1778 
1779 	trace_seq_putmem(s, &old, offset);
1780 	trace_seq_putmem(s, &t->sector,
1781 			 sizeof(old) - offset + t->pdu_len);
1782 }
1783 
1784 static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator * iter,int flags,struct trace_event * event)1785 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1786 			     struct trace_event *event)
1787 {
1788 	blk_trace_synthesize_old_trace(iter);
1789 
1790 	return trace_handle_return(&iter->seq);
1791 }
1792 
blk_tracer_print_line(struct trace_iterator * iter)1793 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1794 {
1795 	if ((iter->ent->type != TRACE_BLK) ||
1796 	    !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1797 		return TRACE_TYPE_UNHANDLED;
1798 
1799 	return print_one_line(iter, true);
1800 }
1801 
1802 static int
blk_tracer_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1803 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1804 {
1805 	/* don't output context-info for blk_classic output */
1806 	if (bit == TRACE_BLK_OPT_CLASSIC) {
1807 		if (set)
1808 			tr->trace_flags &= ~TRACE_ITER(CONTEXT_INFO);
1809 		else
1810 			tr->trace_flags |= TRACE_ITER(CONTEXT_INFO);
1811 	}
1812 	return 0;
1813 }
1814 
1815 static struct tracer blk_tracer __read_mostly = {
1816 	.name		= "blk",
1817 	.init		= blk_tracer_init,
1818 	.reset		= blk_tracer_reset,
1819 	.start		= blk_tracer_start,
1820 	.stop		= blk_tracer_stop,
1821 	.print_header	= blk_tracer_print_header,
1822 	.print_line	= blk_tracer_print_line,
1823 	.flags		= &blk_tracer_flags,
1824 	.set_flag	= blk_tracer_set_flag,
1825 };
1826 
1827 static struct trace_event_functions trace_blk_event_funcs = {
1828 	.trace		= blk_trace_event_print,
1829 	.binary		= blk_trace_event_print_binary,
1830 };
1831 
1832 static struct trace_event trace_blk_event = {
1833 	.type		= TRACE_BLK,
1834 	.funcs		= &trace_blk_event_funcs,
1835 };
1836 
1837 static struct work_struct blktrace_works __initdata;
1838 
__init_blk_tracer(void)1839 static int __init __init_blk_tracer(void)
1840 {
1841 	if (!register_trace_event(&trace_blk_event)) {
1842 		pr_warn("Warning: could not register block events\n");
1843 		return 1;
1844 	}
1845 
1846 	if (register_tracer(&blk_tracer) != 0) {
1847 		pr_warn("Warning: could not register the block tracer\n");
1848 		unregister_trace_event(&trace_blk_event);
1849 		return 1;
1850 	}
1851 
1852 	BUILD_BUG_ON(__alignof__(struct blk_user_trace_setup2) %
1853 		     __alignof__(long));
1854 	BUILD_BUG_ON(__alignof__(struct blk_io_trace2) % __alignof__(long));
1855 
1856 	return 0;
1857 }
1858 
blktrace_works_func(struct work_struct * work)1859 static void __init blktrace_works_func(struct work_struct *work)
1860 {
1861 	__init_blk_tracer();
1862 }
1863 
init_blk_tracer(void)1864 static int __init init_blk_tracer(void)
1865 {
1866 	int ret = 0;
1867 
1868 	if (trace_init_wq) {
1869 		INIT_WORK(&blktrace_works, blktrace_works_func);
1870 		queue_work(trace_init_wq, &blktrace_works);
1871 	} else {
1872 		ret = __init_blk_tracer();
1873 	}
1874 
1875 	return ret;
1876 }
1877 
1878 device_initcall(init_blk_tracer);
1879 
blk_trace_remove_queue(struct request_queue * q)1880 static int blk_trace_remove_queue(struct request_queue *q)
1881 {
1882 	struct blk_trace *bt;
1883 
1884 	bt = rcu_replace_pointer(q->blk_trace, NULL,
1885 				 lockdep_is_held(&q->debugfs_mutex));
1886 	if (bt == NULL)
1887 		return -EINVAL;
1888 
1889 	blk_trace_stop(bt);
1890 
1891 	put_probe_ref();
1892 	synchronize_rcu();
1893 	blk_trace_free(q, bt);
1894 	return 0;
1895 }
1896 
1897 /*
1898  * Setup everything required to start tracing
1899  */
blk_trace_setup_queue(struct request_queue * q,struct block_device * bdev)1900 static int blk_trace_setup_queue(struct request_queue *q,
1901 				 struct block_device *bdev)
1902 {
1903 	struct blk_trace *bt = NULL;
1904 	int ret = -ENOMEM;
1905 
1906 	bt = kzalloc_obj(*bt);
1907 	if (!bt)
1908 		return -ENOMEM;
1909 
1910 	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1911 	if (!bt->msg_data)
1912 		goto free_bt;
1913 
1914 	bt->dev = bdev->bd_dev;
1915 	bt->act_mask = (u16)-1;
1916 
1917 	blk_trace_setup_lba(bt, bdev);
1918 
1919 	rcu_assign_pointer(q->blk_trace, bt);
1920 	get_probe_ref();
1921 	return 0;
1922 
1923 free_bt:
1924 	blk_trace_free(q, bt);
1925 	return ret;
1926 }
1927 
1928 /*
1929  * sysfs interface to enable and configure tracing
1930  */
1931 
1932 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1933 					 struct device_attribute *attr,
1934 					 char *buf);
1935 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1936 					  struct device_attribute *attr,
1937 					  const char *buf, size_t count);
1938 #define BLK_TRACE_DEVICE_ATTR(_name) \
1939 	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1940 		    sysfs_blk_trace_attr_show, \
1941 		    sysfs_blk_trace_attr_store)
1942 
1943 static BLK_TRACE_DEVICE_ATTR(enable);
1944 static BLK_TRACE_DEVICE_ATTR(act_mask);
1945 static BLK_TRACE_DEVICE_ATTR(pid);
1946 static BLK_TRACE_DEVICE_ATTR(start_lba);
1947 static BLK_TRACE_DEVICE_ATTR(end_lba);
1948 
1949 static struct attribute *blk_trace_attrs[] = {
1950 	&dev_attr_enable.attr,
1951 	&dev_attr_act_mask.attr,
1952 	&dev_attr_pid.attr,
1953 	&dev_attr_start_lba.attr,
1954 	&dev_attr_end_lba.attr,
1955 	NULL
1956 };
1957 
1958 struct attribute_group blk_trace_attr_group = {
1959 	.name  = "trace",
1960 	.attrs = blk_trace_attrs,
1961 };
1962 
1963 static const struct {
1964 	int mask;
1965 	const char *str;
1966 } mask_maps[] = {
1967 	{ BLK_TC_READ,		"read"		},
1968 	{ BLK_TC_WRITE,		"write"		},
1969 	{ BLK_TC_FLUSH,		"flush"		},
1970 	{ BLK_TC_SYNC,		"sync"		},
1971 	{ BLK_TC_QUEUE,		"queue"		},
1972 	{ BLK_TC_REQUEUE,	"requeue"	},
1973 	{ BLK_TC_ISSUE,		"issue"		},
1974 	{ BLK_TC_COMPLETE,	"complete"	},
1975 	{ BLK_TC_FS,		"fs"		},
1976 	{ BLK_TC_PC,		"pc"		},
1977 	{ BLK_TC_NOTIFY,	"notify"	},
1978 	{ BLK_TC_AHEAD,		"ahead"		},
1979 	{ BLK_TC_META,		"meta"		},
1980 	{ BLK_TC_DISCARD,	"discard"	},
1981 	{ BLK_TC_DRV_DATA,	"drv_data"	},
1982 	{ BLK_TC_FUA,		"fua"		},
1983 	{ BLK_TC_WRITE_ZEROES,	"write-zeroes"	},
1984 };
1985 
blk_trace_str2mask(const char * str)1986 static int blk_trace_str2mask(const char *str)
1987 {
1988 	int i;
1989 	int mask = 0;
1990 	char *buf, *s, *token;
1991 
1992 	buf = kstrdup(str, GFP_KERNEL);
1993 	if (buf == NULL)
1994 		return -ENOMEM;
1995 	s = strstrip(buf);
1996 
1997 	while (1) {
1998 		token = strsep(&s, ",");
1999 		if (token == NULL)
2000 			break;
2001 
2002 		if (*token == '\0')
2003 			continue;
2004 
2005 		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
2006 			if (strcasecmp(token, mask_maps[i].str) == 0) {
2007 				mask |= mask_maps[i].mask;
2008 				break;
2009 			}
2010 		}
2011 		if (i == ARRAY_SIZE(mask_maps)) {
2012 			mask = -EINVAL;
2013 			break;
2014 		}
2015 	}
2016 	kfree(buf);
2017 
2018 	return mask;
2019 }
2020 
blk_trace_mask2str(char * buf,int mask)2021 static ssize_t blk_trace_mask2str(char *buf, int mask)
2022 {
2023 	int i;
2024 	char *p = buf;
2025 
2026 	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
2027 		if (mask & mask_maps[i].mask) {
2028 			p += sprintf(p, "%s%s",
2029 				    (p == buf) ? "" : ",", mask_maps[i].str);
2030 		}
2031 	}
2032 	*p++ = '\n';
2033 
2034 	return p - buf;
2035 }
2036 
sysfs_blk_trace_attr_show(struct device * dev,struct device_attribute * attr,char * buf)2037 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
2038 					 struct device_attribute *attr,
2039 					 char *buf)
2040 {
2041 	struct block_device *bdev = dev_to_bdev(dev);
2042 	struct request_queue *q = bdev_get_queue(bdev);
2043 	struct blk_trace *bt;
2044 	ssize_t ret = -ENXIO;
2045 
2046 	blk_debugfs_lock_nomemsave(q);
2047 
2048 	bt = rcu_dereference_protected(q->blk_trace,
2049 				       lockdep_is_held(&q->debugfs_mutex));
2050 	if (attr == &dev_attr_enable) {
2051 		ret = sprintf(buf, "%u\n", !!bt);
2052 		goto out_unlock_bdev;
2053 	}
2054 
2055 	if (bt == NULL)
2056 		ret = sprintf(buf, "disabled\n");
2057 	else if (attr == &dev_attr_act_mask)
2058 		ret = blk_trace_mask2str(buf, bt->act_mask);
2059 	else if (attr == &dev_attr_pid)
2060 		ret = sprintf(buf, "%u\n", bt->pid);
2061 	else if (attr == &dev_attr_start_lba)
2062 		ret = sprintf(buf, "%llu\n", bt->start_lba);
2063 	else if (attr == &dev_attr_end_lba)
2064 		ret = sprintf(buf, "%llu\n", bt->end_lba);
2065 
2066 out_unlock_bdev:
2067 	blk_debugfs_unlock_nomemrestore(q);
2068 	return ret;
2069 }
2070 
sysfs_blk_trace_attr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2071 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
2072 					  struct device_attribute *attr,
2073 					  const char *buf, size_t count)
2074 {
2075 	struct block_device *bdev = dev_to_bdev(dev);
2076 	struct request_queue *q = bdev_get_queue(bdev);
2077 	struct blk_trace *bt;
2078 	unsigned int memflags;
2079 	u64 value;
2080 	ssize_t ret = -EINVAL;
2081 
2082 	if (count == 0)
2083 		goto out;
2084 
2085 	if (attr == &dev_attr_act_mask) {
2086 		if (kstrtoull(buf, 0, &value)) {
2087 			/* Assume it is a list of trace category names */
2088 			ret = blk_trace_str2mask(buf);
2089 			if (ret < 0)
2090 				goto out;
2091 			value = ret;
2092 		}
2093 	} else {
2094 		if (kstrtoull(buf, 0, &value))
2095 			goto out;
2096 	}
2097 
2098 	memflags = blk_debugfs_lock(q);
2099 
2100 	bt = rcu_dereference_protected(q->blk_trace,
2101 				       lockdep_is_held(&q->debugfs_mutex));
2102 	if (attr == &dev_attr_enable) {
2103 		if (!!value == !!bt) {
2104 			ret = 0;
2105 			goto out_unlock_bdev;
2106 		}
2107 		if (value)
2108 			ret = blk_trace_setup_queue(q, bdev);
2109 		else
2110 			ret = blk_trace_remove_queue(q);
2111 		goto out_unlock_bdev;
2112 	}
2113 
2114 	ret = 0;
2115 	if (bt == NULL) {
2116 		ret = blk_trace_setup_queue(q, bdev);
2117 		bt = rcu_dereference_protected(q->blk_trace,
2118 				lockdep_is_held(&q->debugfs_mutex));
2119 	}
2120 
2121 	if (ret == 0) {
2122 		if (attr == &dev_attr_act_mask)
2123 			bt->act_mask = value;
2124 		else if (attr == &dev_attr_pid)
2125 			bt->pid = value;
2126 		else if (attr == &dev_attr_start_lba)
2127 			bt->start_lba = value;
2128 		else if (attr == &dev_attr_end_lba)
2129 			bt->end_lba = value;
2130 	}
2131 
2132 out_unlock_bdev:
2133 	blk_debugfs_unlock(q, memflags);
2134 out:
2135 	return ret ? ret : count;
2136 }
2137 #endif /* CONFIG_BLK_DEV_IO_TRACE */
2138 
2139 #ifdef CONFIG_EVENT_TRACING
2140 
2141 /**
2142  * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
2143  * @rwbs:	buffer to be filled
2144  * @opf:	request operation type (REQ_OP_XXX) and flags for the tracepoint
2145  *
2146  * Description:
2147  *     Maps each request operation and flag to a single character and fills the
2148  *     buffer provided by the caller with resulting string.
2149  *
2150  **/
blk_fill_rwbs(char * rwbs,blk_opf_t opf)2151 void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
2152 {
2153 	int i = 0;
2154 
2155 	if (opf & REQ_PREFLUSH)
2156 		rwbs[i++] = 'F';
2157 
2158 	switch (opf & REQ_OP_MASK) {
2159 	case REQ_OP_WRITE:
2160 		rwbs[i++] = 'W';
2161 		break;
2162 	case REQ_OP_DISCARD:
2163 		rwbs[i++] = 'D';
2164 		break;
2165 	case REQ_OP_SECURE_ERASE:
2166 		rwbs[i++] = 'D';
2167 		rwbs[i++] = 'E';
2168 		break;
2169 	case REQ_OP_FLUSH:
2170 		rwbs[i++] = 'F';
2171 		break;
2172 	case REQ_OP_READ:
2173 		rwbs[i++] = 'R';
2174 		break;
2175 	case REQ_OP_ZONE_APPEND:
2176 		rwbs[i++] = 'Z';
2177 		rwbs[i++] = 'A';
2178 		break;
2179 	case REQ_OP_ZONE_RESET:
2180 	case REQ_OP_ZONE_RESET_ALL:
2181 		rwbs[i++] = 'Z';
2182 		rwbs[i++] = 'R';
2183 		if ((opf & REQ_OP_MASK) == REQ_OP_ZONE_RESET_ALL)
2184 			rwbs[i++] = 'A';
2185 		break;
2186 	case REQ_OP_ZONE_FINISH:
2187 		rwbs[i++] = 'Z';
2188 		rwbs[i++] = 'F';
2189 		break;
2190 	case REQ_OP_ZONE_OPEN:
2191 		rwbs[i++] = 'Z';
2192 		rwbs[i++] = 'O';
2193 		break;
2194 	case REQ_OP_ZONE_CLOSE:
2195 		rwbs[i++] = 'Z';
2196 		rwbs[i++] = 'C';
2197 		break;
2198 	case REQ_OP_WRITE_ZEROES:
2199 		rwbs[i++] = 'W';
2200 		rwbs[i++] = 'Z';
2201 		break;
2202 	default:
2203 		rwbs[i++] = 'N';
2204 	}
2205 
2206 	if (opf & REQ_FUA)
2207 		rwbs[i++] = 'F';
2208 	if (opf & REQ_RAHEAD)
2209 		rwbs[i++] = 'A';
2210 	if (opf & REQ_SYNC)
2211 		rwbs[i++] = 'S';
2212 	if (opf & REQ_META)
2213 		rwbs[i++] = 'M';
2214 	if (opf & REQ_ATOMIC)
2215 		rwbs[i++] = 'U';
2216 
2217 	WARN_ON_ONCE(i >= RWBS_LEN);
2218 
2219 	rwbs[i] = '\0';
2220 }
2221 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
2222 
2223 #endif /* CONFIG_EVENT_TRACING */
2224 
2225