xref: /linux/kernel/trace/blktrace.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4  *
5  */
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/blktrace_api.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/debugfs.h>
14 #include <linux/export.h>
15 #include <linux/time.h>
16 #include <linux/uaccess.h>
17 #include <linux/list.h>
18 #include <linux/blk-cgroup.h>
19 
20 #include "../../block/blk.h"
21 
22 #include <trace/events/block.h>
23 
24 #include "trace_output.h"
25 
26 #ifdef CONFIG_BLK_DEV_IO_TRACE
27 
28 static unsigned int blktrace_seq __read_mostly = 1;
29 
30 static struct trace_array *blk_tr;
31 static bool blk_tracer_enabled __read_mostly;
32 
33 static LIST_HEAD(running_trace_list);
34 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
35 
36 /* Select an alternative, minimalistic output than the original one */
37 #define TRACE_BLK_OPT_CLASSIC	0x1
38 #define TRACE_BLK_OPT_CGROUP	0x2
39 #define TRACE_BLK_OPT_CGNAME	0x4
40 
41 static struct tracer_opt blk_tracer_opts[] = {
42 	/* Default disable the minimalistic output */
43 	{ TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
44 #ifdef CONFIG_BLK_CGROUP
45 	{ TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
46 	{ TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
47 #endif
48 	{ }
49 };
50 
51 static struct tracer_flags blk_tracer_flags = {
52 	.val  = 0,
53 	.opts = blk_tracer_opts,
54 };
55 
56 /* Global reference count of probes */
57 static DEFINE_MUTEX(blk_probe_mutex);
58 static int blk_probes_ref;
59 
60 static void blk_register_tracepoints(void);
61 static void blk_unregister_tracepoints(void);
62 
63 /*
64  * Send out a notify message.
65  */
66 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
67 		       const void *data, size_t len,
68 		       union kernfs_node_id *cgid)
69 {
70 	struct blk_io_trace *t;
71 	struct ring_buffer_event *event = NULL;
72 	struct ring_buffer *buffer = NULL;
73 	int pc = 0;
74 	int cpu = smp_processor_id();
75 	bool blk_tracer = blk_tracer_enabled;
76 	ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
77 
78 	if (blk_tracer) {
79 		buffer = blk_tr->trace_buffer.buffer;
80 		pc = preempt_count();
81 		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
82 						  sizeof(*t) + len + cgid_len,
83 						  0, pc);
84 		if (!event)
85 			return;
86 		t = ring_buffer_event_data(event);
87 		goto record_it;
88 	}
89 
90 	if (!bt->rchan)
91 		return;
92 
93 	t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
94 	if (t) {
95 		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
96 		t->time = ktime_to_ns(ktime_get());
97 record_it:
98 		t->device = bt->dev;
99 		t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
100 		t->pid = pid;
101 		t->cpu = cpu;
102 		t->pdu_len = len + cgid_len;
103 		if (cgid)
104 			memcpy((void *)t + sizeof(*t), cgid, cgid_len);
105 		memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
106 
107 		if (blk_tracer)
108 			trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
109 	}
110 }
111 
112 /*
113  * Send out a notify for this process, if we haven't done so since a trace
114  * started
115  */
116 static void trace_note_tsk(struct task_struct *tsk)
117 {
118 	unsigned long flags;
119 	struct blk_trace *bt;
120 
121 	tsk->btrace_seq = blktrace_seq;
122 	spin_lock_irqsave(&running_trace_lock, flags);
123 	list_for_each_entry(bt, &running_trace_list, running_list) {
124 		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
125 			   sizeof(tsk->comm), NULL);
126 	}
127 	spin_unlock_irqrestore(&running_trace_lock, flags);
128 }
129 
130 static void trace_note_time(struct blk_trace *bt)
131 {
132 	struct timespec64 now;
133 	unsigned long flags;
134 	u32 words[2];
135 
136 	/* need to check user space to see if this breaks in y2038 or y2106 */
137 	ktime_get_real_ts64(&now);
138 	words[0] = (u32)now.tv_sec;
139 	words[1] = now.tv_nsec;
140 
141 	local_irq_save(flags);
142 	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
143 	local_irq_restore(flags);
144 }
145 
146 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
147 	const char *fmt, ...)
148 {
149 	int n;
150 	va_list args;
151 	unsigned long flags;
152 	char *buf;
153 
154 	if (unlikely(bt->trace_state != Blktrace_running &&
155 		     !blk_tracer_enabled))
156 		return;
157 
158 	/*
159 	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
160 	 * message to the trace.
161 	 */
162 	if (!(bt->act_mask & BLK_TC_NOTIFY))
163 		return;
164 
165 	local_irq_save(flags);
166 	buf = this_cpu_ptr(bt->msg_data);
167 	va_start(args, fmt);
168 	n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
169 	va_end(args);
170 
171 	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
172 		blkcg = NULL;
173 #ifdef CONFIG_BLK_CGROUP
174 	trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
175 		blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
176 #else
177 	trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
178 #endif
179 	local_irq_restore(flags);
180 }
181 EXPORT_SYMBOL_GPL(__trace_note_message);
182 
183 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
184 			 pid_t pid)
185 {
186 	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
187 		return 1;
188 	if (sector && (sector < bt->start_lba || sector > bt->end_lba))
189 		return 1;
190 	if (bt->pid && pid != bt->pid)
191 		return 1;
192 
193 	return 0;
194 }
195 
196 /*
197  * Data direction bit lookup
198  */
199 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
200 				 BLK_TC_ACT(BLK_TC_WRITE) };
201 
202 #define BLK_TC_RAHEAD		BLK_TC_AHEAD
203 #define BLK_TC_PREFLUSH		BLK_TC_FLUSH
204 
205 /* The ilog2() calls fall out because they're constant */
206 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
207 	  (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
208 
209 /*
210  * The worker for the various blk_add_trace*() types. Fills out a
211  * blk_io_trace structure and places it in a per-cpu subbuffer.
212  */
213 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
214 		     int op, int op_flags, u32 what, int error, int pdu_len,
215 		     void *pdu_data, union kernfs_node_id *cgid)
216 {
217 	struct task_struct *tsk = current;
218 	struct ring_buffer_event *event = NULL;
219 	struct ring_buffer *buffer = NULL;
220 	struct blk_io_trace *t;
221 	unsigned long flags = 0;
222 	unsigned long *sequence;
223 	pid_t pid;
224 	int cpu, pc = 0;
225 	bool blk_tracer = blk_tracer_enabled;
226 	ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
227 
228 	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
229 		return;
230 
231 	what |= ddir_act[op_is_write(op) ? WRITE : READ];
232 	what |= MASK_TC_BIT(op_flags, SYNC);
233 	what |= MASK_TC_BIT(op_flags, RAHEAD);
234 	what |= MASK_TC_BIT(op_flags, META);
235 	what |= MASK_TC_BIT(op_flags, PREFLUSH);
236 	what |= MASK_TC_BIT(op_flags, FUA);
237 	if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
238 		what |= BLK_TC_ACT(BLK_TC_DISCARD);
239 	if (op == REQ_OP_FLUSH)
240 		what |= BLK_TC_ACT(BLK_TC_FLUSH);
241 	if (cgid)
242 		what |= __BLK_TA_CGROUP;
243 
244 	pid = tsk->pid;
245 	if (act_log_check(bt, what, sector, pid))
246 		return;
247 	cpu = raw_smp_processor_id();
248 
249 	if (blk_tracer) {
250 		tracing_record_cmdline(current);
251 
252 		buffer = blk_tr->trace_buffer.buffer;
253 		pc = preempt_count();
254 		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
255 						  sizeof(*t) + pdu_len + cgid_len,
256 						  0, pc);
257 		if (!event)
258 			return;
259 		t = ring_buffer_event_data(event);
260 		goto record_it;
261 	}
262 
263 	if (unlikely(tsk->btrace_seq != blktrace_seq))
264 		trace_note_tsk(tsk);
265 
266 	/*
267 	 * A word about the locking here - we disable interrupts to reserve
268 	 * some space in the relay per-cpu buffer, to prevent an irq
269 	 * from coming in and stepping on our toes.
270 	 */
271 	local_irq_save(flags);
272 	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
273 	if (t) {
274 		sequence = per_cpu_ptr(bt->sequence, cpu);
275 
276 		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
277 		t->sequence = ++(*sequence);
278 		t->time = ktime_to_ns(ktime_get());
279 record_it:
280 		/*
281 		 * These two are not needed in ftrace as they are in the
282 		 * generic trace_entry, filled by tracing_generic_entry_update,
283 		 * but for the trace_event->bin() synthesizer benefit we do it
284 		 * here too.
285 		 */
286 		t->cpu = cpu;
287 		t->pid = pid;
288 
289 		t->sector = sector;
290 		t->bytes = bytes;
291 		t->action = what;
292 		t->device = bt->dev;
293 		t->error = error;
294 		t->pdu_len = pdu_len + cgid_len;
295 
296 		if (cgid_len)
297 			memcpy((void *)t + sizeof(*t), cgid, cgid_len);
298 		if (pdu_len)
299 			memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
300 
301 		if (blk_tracer) {
302 			trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
303 			return;
304 		}
305 	}
306 
307 	local_irq_restore(flags);
308 }
309 
310 static void blk_trace_free(struct blk_trace *bt)
311 {
312 	debugfs_remove(bt->msg_file);
313 	debugfs_remove(bt->dropped_file);
314 	relay_close(bt->rchan);
315 	debugfs_remove(bt->dir);
316 	free_percpu(bt->sequence);
317 	free_percpu(bt->msg_data);
318 	kfree(bt);
319 }
320 
321 static void get_probe_ref(void)
322 {
323 	mutex_lock(&blk_probe_mutex);
324 	if (++blk_probes_ref == 1)
325 		blk_register_tracepoints();
326 	mutex_unlock(&blk_probe_mutex);
327 }
328 
329 static void put_probe_ref(void)
330 {
331 	mutex_lock(&blk_probe_mutex);
332 	if (!--blk_probes_ref)
333 		blk_unregister_tracepoints();
334 	mutex_unlock(&blk_probe_mutex);
335 }
336 
337 static void blk_trace_cleanup(struct blk_trace *bt)
338 {
339 	blk_trace_free(bt);
340 	put_probe_ref();
341 }
342 
343 static int __blk_trace_remove(struct request_queue *q)
344 {
345 	struct blk_trace *bt;
346 
347 	bt = xchg(&q->blk_trace, NULL);
348 	if (!bt)
349 		return -EINVAL;
350 
351 	if (bt->trace_state != Blktrace_running)
352 		blk_trace_cleanup(bt);
353 
354 	return 0;
355 }
356 
357 int blk_trace_remove(struct request_queue *q)
358 {
359 	int ret;
360 
361 	mutex_lock(&q->blk_trace_mutex);
362 	ret = __blk_trace_remove(q);
363 	mutex_unlock(&q->blk_trace_mutex);
364 
365 	return ret;
366 }
367 EXPORT_SYMBOL_GPL(blk_trace_remove);
368 
369 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
370 				size_t count, loff_t *ppos)
371 {
372 	struct blk_trace *bt = filp->private_data;
373 	char buf[16];
374 
375 	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
376 
377 	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
378 }
379 
380 static const struct file_operations blk_dropped_fops = {
381 	.owner =	THIS_MODULE,
382 	.open =		simple_open,
383 	.read =		blk_dropped_read,
384 	.llseek =	default_llseek,
385 };
386 
387 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
388 				size_t count, loff_t *ppos)
389 {
390 	char *msg;
391 	struct blk_trace *bt;
392 
393 	if (count >= BLK_TN_MAX_MSG)
394 		return -EINVAL;
395 
396 	msg = memdup_user_nul(buffer, count);
397 	if (IS_ERR(msg))
398 		return PTR_ERR(msg);
399 
400 	bt = filp->private_data;
401 	__trace_note_message(bt, NULL, "%s", msg);
402 	kfree(msg);
403 
404 	return count;
405 }
406 
407 static const struct file_operations blk_msg_fops = {
408 	.owner =	THIS_MODULE,
409 	.open =		simple_open,
410 	.write =	blk_msg_write,
411 	.llseek =	noop_llseek,
412 };
413 
414 /*
415  * Keep track of how many times we encountered a full subbuffer, to aid
416  * the user space app in telling how many lost events there were.
417  */
418 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
419 				     void *prev_subbuf, size_t prev_padding)
420 {
421 	struct blk_trace *bt;
422 
423 	if (!relay_buf_full(buf))
424 		return 1;
425 
426 	bt = buf->chan->private_data;
427 	atomic_inc(&bt->dropped);
428 	return 0;
429 }
430 
431 static int blk_remove_buf_file_callback(struct dentry *dentry)
432 {
433 	debugfs_remove(dentry);
434 
435 	return 0;
436 }
437 
438 static struct dentry *blk_create_buf_file_callback(const char *filename,
439 						   struct dentry *parent,
440 						   umode_t mode,
441 						   struct rchan_buf *buf,
442 						   int *is_global)
443 {
444 	return debugfs_create_file(filename, mode, parent, buf,
445 					&relay_file_operations);
446 }
447 
448 static struct rchan_callbacks blk_relay_callbacks = {
449 	.subbuf_start		= blk_subbuf_start_callback,
450 	.create_buf_file	= blk_create_buf_file_callback,
451 	.remove_buf_file	= blk_remove_buf_file_callback,
452 };
453 
454 static void blk_trace_setup_lba(struct blk_trace *bt,
455 				struct block_device *bdev)
456 {
457 	struct hd_struct *part = NULL;
458 
459 	if (bdev)
460 		part = bdev->bd_part;
461 
462 	if (part) {
463 		bt->start_lba = part->start_sect;
464 		bt->end_lba = part->start_sect + part->nr_sects;
465 	} else {
466 		bt->start_lba = 0;
467 		bt->end_lba = -1ULL;
468 	}
469 }
470 
471 /*
472  * Setup everything required to start tracing
473  */
474 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
475 			      struct block_device *bdev,
476 			      struct blk_user_trace_setup *buts)
477 {
478 	struct blk_trace *bt = NULL;
479 	struct dentry *dir = NULL;
480 	int ret;
481 
482 	if (!buts->buf_size || !buts->buf_nr)
483 		return -EINVAL;
484 
485 	if (!blk_debugfs_root)
486 		return -ENOENT;
487 
488 	strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
489 	buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
490 
491 	/*
492 	 * some device names have larger paths - convert the slashes
493 	 * to underscores for this to work as expected
494 	 */
495 	strreplace(buts->name, '/', '_');
496 
497 	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
498 	if (!bt)
499 		return -ENOMEM;
500 
501 	ret = -ENOMEM;
502 	bt->sequence = alloc_percpu(unsigned long);
503 	if (!bt->sequence)
504 		goto err;
505 
506 	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
507 	if (!bt->msg_data)
508 		goto err;
509 
510 	ret = -ENOENT;
511 
512 	dir = debugfs_lookup(buts->name, blk_debugfs_root);
513 	if (!dir)
514 		bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
515 
516 	bt->dev = dev;
517 	atomic_set(&bt->dropped, 0);
518 	INIT_LIST_HEAD(&bt->running_list);
519 
520 	ret = -EIO;
521 	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
522 					       &blk_dropped_fops);
523 
524 	bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
525 
526 	bt->rchan = relay_open("trace", dir, buts->buf_size,
527 				buts->buf_nr, &blk_relay_callbacks, bt);
528 	if (!bt->rchan)
529 		goto err;
530 
531 	bt->act_mask = buts->act_mask;
532 	if (!bt->act_mask)
533 		bt->act_mask = (u16) -1;
534 
535 	blk_trace_setup_lba(bt, bdev);
536 
537 	/* overwrite with user settings */
538 	if (buts->start_lba)
539 		bt->start_lba = buts->start_lba;
540 	if (buts->end_lba)
541 		bt->end_lba = buts->end_lba;
542 
543 	bt->pid = buts->pid;
544 	bt->trace_state = Blktrace_setup;
545 
546 	ret = -EBUSY;
547 	if (cmpxchg(&q->blk_trace, NULL, bt))
548 		goto err;
549 
550 	get_probe_ref();
551 
552 	ret = 0;
553 err:
554 	if (dir && !bt->dir)
555 		dput(dir);
556 	if (ret)
557 		blk_trace_free(bt);
558 	return ret;
559 }
560 
561 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
562 			     struct block_device *bdev, char __user *arg)
563 {
564 	struct blk_user_trace_setup buts;
565 	int ret;
566 
567 	ret = copy_from_user(&buts, arg, sizeof(buts));
568 	if (ret)
569 		return -EFAULT;
570 
571 	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
572 	if (ret)
573 		return ret;
574 
575 	if (copy_to_user(arg, &buts, sizeof(buts))) {
576 		__blk_trace_remove(q);
577 		return -EFAULT;
578 	}
579 	return 0;
580 }
581 
582 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
583 		    struct block_device *bdev,
584 		    char __user *arg)
585 {
586 	int ret;
587 
588 	mutex_lock(&q->blk_trace_mutex);
589 	ret = __blk_trace_setup(q, name, dev, bdev, arg);
590 	mutex_unlock(&q->blk_trace_mutex);
591 
592 	return ret;
593 }
594 EXPORT_SYMBOL_GPL(blk_trace_setup);
595 
596 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
597 static int compat_blk_trace_setup(struct request_queue *q, char *name,
598 				  dev_t dev, struct block_device *bdev,
599 				  char __user *arg)
600 {
601 	struct blk_user_trace_setup buts;
602 	struct compat_blk_user_trace_setup cbuts;
603 	int ret;
604 
605 	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
606 		return -EFAULT;
607 
608 	buts = (struct blk_user_trace_setup) {
609 		.act_mask = cbuts.act_mask,
610 		.buf_size = cbuts.buf_size,
611 		.buf_nr = cbuts.buf_nr,
612 		.start_lba = cbuts.start_lba,
613 		.end_lba = cbuts.end_lba,
614 		.pid = cbuts.pid,
615 	};
616 
617 	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
618 	if (ret)
619 		return ret;
620 
621 	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
622 		__blk_trace_remove(q);
623 		return -EFAULT;
624 	}
625 
626 	return 0;
627 }
628 #endif
629 
630 static int __blk_trace_startstop(struct request_queue *q, int start)
631 {
632 	int ret;
633 	struct blk_trace *bt = q->blk_trace;
634 
635 	if (bt == NULL)
636 		return -EINVAL;
637 
638 	/*
639 	 * For starting a trace, we can transition from a setup or stopped
640 	 * trace. For stopping a trace, the state must be running
641 	 */
642 	ret = -EINVAL;
643 	if (start) {
644 		if (bt->trace_state == Blktrace_setup ||
645 		    bt->trace_state == Blktrace_stopped) {
646 			blktrace_seq++;
647 			smp_mb();
648 			bt->trace_state = Blktrace_running;
649 			spin_lock_irq(&running_trace_lock);
650 			list_add(&bt->running_list, &running_trace_list);
651 			spin_unlock_irq(&running_trace_lock);
652 
653 			trace_note_time(bt);
654 			ret = 0;
655 		}
656 	} else {
657 		if (bt->trace_state == Blktrace_running) {
658 			bt->trace_state = Blktrace_stopped;
659 			spin_lock_irq(&running_trace_lock);
660 			list_del_init(&bt->running_list);
661 			spin_unlock_irq(&running_trace_lock);
662 			relay_flush(bt->rchan);
663 			ret = 0;
664 		}
665 	}
666 
667 	return ret;
668 }
669 
670 int blk_trace_startstop(struct request_queue *q, int start)
671 {
672 	int ret;
673 
674 	mutex_lock(&q->blk_trace_mutex);
675 	ret = __blk_trace_startstop(q, start);
676 	mutex_unlock(&q->blk_trace_mutex);
677 
678 	return ret;
679 }
680 EXPORT_SYMBOL_GPL(blk_trace_startstop);
681 
682 /*
683  * When reading or writing the blktrace sysfs files, the references to the
684  * opened sysfs or device files should prevent the underlying block device
685  * from being removed. So no further delete protection is really needed.
686  */
687 
688 /**
689  * blk_trace_ioctl: - handle the ioctls associated with tracing
690  * @bdev:	the block device
691  * @cmd:	the ioctl cmd
692  * @arg:	the argument data, if any
693  *
694  **/
695 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
696 {
697 	struct request_queue *q;
698 	int ret, start = 0;
699 	char b[BDEVNAME_SIZE];
700 
701 	q = bdev_get_queue(bdev);
702 	if (!q)
703 		return -ENXIO;
704 
705 	mutex_lock(&q->blk_trace_mutex);
706 
707 	switch (cmd) {
708 	case BLKTRACESETUP:
709 		bdevname(bdev, b);
710 		ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
711 		break;
712 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
713 	case BLKTRACESETUP32:
714 		bdevname(bdev, b);
715 		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
716 		break;
717 #endif
718 	case BLKTRACESTART:
719 		start = 1;
720 		/* fall through */
721 	case BLKTRACESTOP:
722 		ret = __blk_trace_startstop(q, start);
723 		break;
724 	case BLKTRACETEARDOWN:
725 		ret = __blk_trace_remove(q);
726 		break;
727 	default:
728 		ret = -ENOTTY;
729 		break;
730 	}
731 
732 	mutex_unlock(&q->blk_trace_mutex);
733 	return ret;
734 }
735 
736 /**
737  * blk_trace_shutdown: - stop and cleanup trace structures
738  * @q:    the request queue associated with the device
739  *
740  **/
741 void blk_trace_shutdown(struct request_queue *q)
742 {
743 	mutex_lock(&q->blk_trace_mutex);
744 
745 	if (q->blk_trace) {
746 		__blk_trace_startstop(q, 0);
747 		__blk_trace_remove(q);
748 	}
749 
750 	mutex_unlock(&q->blk_trace_mutex);
751 }
752 
753 #ifdef CONFIG_BLK_CGROUP
754 static union kernfs_node_id *
755 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
756 {
757 	struct blk_trace *bt = q->blk_trace;
758 
759 	if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
760 		return NULL;
761 
762 	if (!bio->bi_blkg)
763 		return NULL;
764 	return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
765 }
766 #else
767 static union kernfs_node_id *
768 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
769 {
770 	return NULL;
771 }
772 #endif
773 
774 static union kernfs_node_id *
775 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
776 {
777 	if (!rq->bio)
778 		return NULL;
779 	/* Use the first bio */
780 	return blk_trace_bio_get_cgid(q, rq->bio);
781 }
782 
783 /*
784  * blktrace probes
785  */
786 
787 /**
788  * blk_add_trace_rq - Add a trace for a request oriented action
789  * @rq:		the source request
790  * @error:	return status to log
791  * @nr_bytes:	number of completed bytes
792  * @what:	the action
793  * @cgid:	the cgroup info
794  *
795  * Description:
796  *     Records an action against a request. Will log the bio offset + size.
797  *
798  **/
799 static void blk_add_trace_rq(struct request *rq, int error,
800 			     unsigned int nr_bytes, u32 what,
801 			     union kernfs_node_id *cgid)
802 {
803 	struct blk_trace *bt = rq->q->blk_trace;
804 
805 	if (likely(!bt))
806 		return;
807 
808 	if (blk_rq_is_passthrough(rq))
809 		what |= BLK_TC_ACT(BLK_TC_PC);
810 	else
811 		what |= BLK_TC_ACT(BLK_TC_FS);
812 
813 	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
814 			rq->cmd_flags, what, error, 0, NULL, cgid);
815 }
816 
817 static void blk_add_trace_rq_insert(void *ignore,
818 				    struct request_queue *q, struct request *rq)
819 {
820 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
821 			 blk_trace_request_get_cgid(q, rq));
822 }
823 
824 static void blk_add_trace_rq_issue(void *ignore,
825 				   struct request_queue *q, struct request *rq)
826 {
827 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
828 			 blk_trace_request_get_cgid(q, rq));
829 }
830 
831 static void blk_add_trace_rq_requeue(void *ignore,
832 				     struct request_queue *q,
833 				     struct request *rq)
834 {
835 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
836 			 blk_trace_request_get_cgid(q, rq));
837 }
838 
839 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
840 			int error, unsigned int nr_bytes)
841 {
842 	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
843 			 blk_trace_request_get_cgid(rq->q, rq));
844 }
845 
846 /**
847  * blk_add_trace_bio - Add a trace for a bio oriented action
848  * @q:		queue the io is for
849  * @bio:	the source bio
850  * @what:	the action
851  * @error:	error, if any
852  *
853  * Description:
854  *     Records an action against a bio. Will log the bio offset + size.
855  *
856  **/
857 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
858 			      u32 what, int error)
859 {
860 	struct blk_trace *bt = q->blk_trace;
861 
862 	if (likely(!bt))
863 		return;
864 
865 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
866 			bio_op(bio), bio->bi_opf, what, error, 0, NULL,
867 			blk_trace_bio_get_cgid(q, bio));
868 }
869 
870 static void blk_add_trace_bio_bounce(void *ignore,
871 				     struct request_queue *q, struct bio *bio)
872 {
873 	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
874 }
875 
876 static void blk_add_trace_bio_complete(void *ignore,
877 				       struct request_queue *q, struct bio *bio,
878 				       int error)
879 {
880 	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
881 }
882 
883 static void blk_add_trace_bio_backmerge(void *ignore,
884 					struct request_queue *q,
885 					struct request *rq,
886 					struct bio *bio)
887 {
888 	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
889 }
890 
891 static void blk_add_trace_bio_frontmerge(void *ignore,
892 					 struct request_queue *q,
893 					 struct request *rq,
894 					 struct bio *bio)
895 {
896 	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
897 }
898 
899 static void blk_add_trace_bio_queue(void *ignore,
900 				    struct request_queue *q, struct bio *bio)
901 {
902 	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
903 }
904 
905 static void blk_add_trace_getrq(void *ignore,
906 				struct request_queue *q,
907 				struct bio *bio, int rw)
908 {
909 	if (bio)
910 		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
911 	else {
912 		struct blk_trace *bt = q->blk_trace;
913 
914 		if (bt)
915 			__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
916 					NULL, NULL);
917 	}
918 }
919 
920 
921 static void blk_add_trace_sleeprq(void *ignore,
922 				  struct request_queue *q,
923 				  struct bio *bio, int rw)
924 {
925 	if (bio)
926 		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
927 	else {
928 		struct blk_trace *bt = q->blk_trace;
929 
930 		if (bt)
931 			__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
932 					0, 0, NULL, NULL);
933 	}
934 }
935 
936 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
937 {
938 	struct blk_trace *bt = q->blk_trace;
939 
940 	if (bt)
941 		__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
942 }
943 
944 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
945 				    unsigned int depth, bool explicit)
946 {
947 	struct blk_trace *bt = q->blk_trace;
948 
949 	if (bt) {
950 		__be64 rpdu = cpu_to_be64(depth);
951 		u32 what;
952 
953 		if (explicit)
954 			what = BLK_TA_UNPLUG_IO;
955 		else
956 			what = BLK_TA_UNPLUG_TIMER;
957 
958 		__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
959 	}
960 }
961 
962 static void blk_add_trace_split(void *ignore,
963 				struct request_queue *q, struct bio *bio,
964 				unsigned int pdu)
965 {
966 	struct blk_trace *bt = q->blk_trace;
967 
968 	if (bt) {
969 		__be64 rpdu = cpu_to_be64(pdu);
970 
971 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
972 				bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
973 				BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
974 				&rpdu, blk_trace_bio_get_cgid(q, bio));
975 	}
976 }
977 
978 /**
979  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
980  * @ignore:	trace callback data parameter (not used)
981  * @q:		queue the io is for
982  * @bio:	the source bio
983  * @dev:	target device
984  * @from:	source sector
985  *
986  * Description:
987  *     Device mapper or raid target sometimes need to split a bio because
988  *     it spans a stripe (or similar). Add a trace for that action.
989  *
990  **/
991 static void blk_add_trace_bio_remap(void *ignore,
992 				    struct request_queue *q, struct bio *bio,
993 				    dev_t dev, sector_t from)
994 {
995 	struct blk_trace *bt = q->blk_trace;
996 	struct blk_io_trace_remap r;
997 
998 	if (likely(!bt))
999 		return;
1000 
1001 	r.device_from = cpu_to_be32(dev);
1002 	r.device_to   = cpu_to_be32(bio_dev(bio));
1003 	r.sector_from = cpu_to_be64(from);
1004 
1005 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1006 			bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
1007 			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1008 }
1009 
1010 /**
1011  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1012  * @ignore:	trace callback data parameter (not used)
1013  * @q:		queue the io is for
1014  * @rq:		the source request
1015  * @dev:	target device
1016  * @from:	source sector
1017  *
1018  * Description:
1019  *     Device mapper remaps request to other devices.
1020  *     Add a trace for that action.
1021  *
1022  **/
1023 static void blk_add_trace_rq_remap(void *ignore,
1024 				   struct request_queue *q,
1025 				   struct request *rq, dev_t dev,
1026 				   sector_t from)
1027 {
1028 	struct blk_trace *bt = q->blk_trace;
1029 	struct blk_io_trace_remap r;
1030 
1031 	if (likely(!bt))
1032 		return;
1033 
1034 	r.device_from = cpu_to_be32(dev);
1035 	r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
1036 	r.sector_from = cpu_to_be64(from);
1037 
1038 	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1039 			rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1040 			sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1041 }
1042 
1043 /**
1044  * blk_add_driver_data - Add binary message with driver-specific data
1045  * @q:		queue the io is for
1046  * @rq:		io request
1047  * @data:	driver-specific data
1048  * @len:	length of driver-specific data
1049  *
1050  * Description:
1051  *     Some drivers might want to write driver-specific data per request.
1052  *
1053  **/
1054 void blk_add_driver_data(struct request_queue *q,
1055 			 struct request *rq,
1056 			 void *data, size_t len)
1057 {
1058 	struct blk_trace *bt = q->blk_trace;
1059 
1060 	if (likely(!bt))
1061 		return;
1062 
1063 	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1064 				BLK_TA_DRV_DATA, 0, len, data,
1065 				blk_trace_request_get_cgid(q, rq));
1066 }
1067 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1068 
1069 static void blk_register_tracepoints(void)
1070 {
1071 	int ret;
1072 
1073 	ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1074 	WARN_ON(ret);
1075 	ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1076 	WARN_ON(ret);
1077 	ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1078 	WARN_ON(ret);
1079 	ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1080 	WARN_ON(ret);
1081 	ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1082 	WARN_ON(ret);
1083 	ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1084 	WARN_ON(ret);
1085 	ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1086 	WARN_ON(ret);
1087 	ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1088 	WARN_ON(ret);
1089 	ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1090 	WARN_ON(ret);
1091 	ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1092 	WARN_ON(ret);
1093 	ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1094 	WARN_ON(ret);
1095 	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1096 	WARN_ON(ret);
1097 	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1098 	WARN_ON(ret);
1099 	ret = register_trace_block_split(blk_add_trace_split, NULL);
1100 	WARN_ON(ret);
1101 	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1102 	WARN_ON(ret);
1103 	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1104 	WARN_ON(ret);
1105 }
1106 
1107 static void blk_unregister_tracepoints(void)
1108 {
1109 	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1110 	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1111 	unregister_trace_block_split(blk_add_trace_split, NULL);
1112 	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1113 	unregister_trace_block_plug(blk_add_trace_plug, NULL);
1114 	unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1115 	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1116 	unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1117 	unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1118 	unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1119 	unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1120 	unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1121 	unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1122 	unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1123 	unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1124 	unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1125 
1126 	tracepoint_synchronize_unregister();
1127 }
1128 
1129 /*
1130  * struct blk_io_tracer formatting routines
1131  */
1132 
1133 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1134 {
1135 	int i = 0;
1136 	int tc = t->action >> BLK_TC_SHIFT;
1137 
1138 	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1139 		rwbs[i++] = 'N';
1140 		goto out;
1141 	}
1142 
1143 	if (tc & BLK_TC_FLUSH)
1144 		rwbs[i++] = 'F';
1145 
1146 	if (tc & BLK_TC_DISCARD)
1147 		rwbs[i++] = 'D';
1148 	else if (tc & BLK_TC_WRITE)
1149 		rwbs[i++] = 'W';
1150 	else if (t->bytes)
1151 		rwbs[i++] = 'R';
1152 	else
1153 		rwbs[i++] = 'N';
1154 
1155 	if (tc & BLK_TC_FUA)
1156 		rwbs[i++] = 'F';
1157 	if (tc & BLK_TC_AHEAD)
1158 		rwbs[i++] = 'A';
1159 	if (tc & BLK_TC_SYNC)
1160 		rwbs[i++] = 'S';
1161 	if (tc & BLK_TC_META)
1162 		rwbs[i++] = 'M';
1163 out:
1164 	rwbs[i] = '\0';
1165 }
1166 
1167 static inline
1168 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1169 {
1170 	return (const struct blk_io_trace *)ent;
1171 }
1172 
1173 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1174 {
1175 	return (void *)(te_blk_io_trace(ent) + 1) +
1176 		(has_cg ? sizeof(union kernfs_node_id) : 0);
1177 }
1178 
1179 static inline const void *cgid_start(const struct trace_entry *ent)
1180 {
1181 	return (void *)(te_blk_io_trace(ent) + 1);
1182 }
1183 
1184 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1185 {
1186 	return te_blk_io_trace(ent)->pdu_len -
1187 			(has_cg ? sizeof(union kernfs_node_id) : 0);
1188 }
1189 
1190 static inline u32 t_action(const struct trace_entry *ent)
1191 {
1192 	return te_blk_io_trace(ent)->action;
1193 }
1194 
1195 static inline u32 t_bytes(const struct trace_entry *ent)
1196 {
1197 	return te_blk_io_trace(ent)->bytes;
1198 }
1199 
1200 static inline u32 t_sec(const struct trace_entry *ent)
1201 {
1202 	return te_blk_io_trace(ent)->bytes >> 9;
1203 }
1204 
1205 static inline unsigned long long t_sector(const struct trace_entry *ent)
1206 {
1207 	return te_blk_io_trace(ent)->sector;
1208 }
1209 
1210 static inline __u16 t_error(const struct trace_entry *ent)
1211 {
1212 	return te_blk_io_trace(ent)->error;
1213 }
1214 
1215 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1216 {
1217 	const __u64 *val = pdu_start(ent, has_cg);
1218 	return be64_to_cpu(*val);
1219 }
1220 
1221 static void get_pdu_remap(const struct trace_entry *ent,
1222 			  struct blk_io_trace_remap *r, bool has_cg)
1223 {
1224 	const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1225 	__u64 sector_from = __r->sector_from;
1226 
1227 	r->device_from = be32_to_cpu(__r->device_from);
1228 	r->device_to   = be32_to_cpu(__r->device_to);
1229 	r->sector_from = be64_to_cpu(sector_from);
1230 }
1231 
1232 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1233 	bool has_cg);
1234 
1235 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1236 	bool has_cg)
1237 {
1238 	char rwbs[RWBS_LEN];
1239 	unsigned long long ts  = iter->ts;
1240 	unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1241 	unsigned secs	       = (unsigned long)ts;
1242 	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1243 
1244 	fill_rwbs(rwbs, t);
1245 
1246 	trace_seq_printf(&iter->seq,
1247 			 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1248 			 MAJOR(t->device), MINOR(t->device), iter->cpu,
1249 			 secs, nsec_rem, iter->ent->pid, act, rwbs);
1250 }
1251 
1252 static void blk_log_action(struct trace_iterator *iter, const char *act,
1253 	bool has_cg)
1254 {
1255 	char rwbs[RWBS_LEN];
1256 	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1257 
1258 	fill_rwbs(rwbs, t);
1259 	if (has_cg) {
1260 		const union kernfs_node_id *id = cgid_start(iter->ent);
1261 
1262 		if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1263 			char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1264 
1265 			cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1266 				sizeof(blkcg_name_buf));
1267 			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1268 				 MAJOR(t->device), MINOR(t->device),
1269 				 blkcg_name_buf, act, rwbs);
1270 		} else
1271 			trace_seq_printf(&iter->seq,
1272 				 "%3d,%-3d %x,%-x %2s %3s ",
1273 				 MAJOR(t->device), MINOR(t->device),
1274 				 id->ino, id->generation, act, rwbs);
1275 	} else
1276 		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1277 				 MAJOR(t->device), MINOR(t->device), act, rwbs);
1278 }
1279 
1280 static void blk_log_dump_pdu(struct trace_seq *s,
1281 	const struct trace_entry *ent, bool has_cg)
1282 {
1283 	const unsigned char *pdu_buf;
1284 	int pdu_len;
1285 	int i, end;
1286 
1287 	pdu_buf = pdu_start(ent, has_cg);
1288 	pdu_len = pdu_real_len(ent, has_cg);
1289 
1290 	if (!pdu_len)
1291 		return;
1292 
1293 	/* find the last zero that needs to be printed */
1294 	for (end = pdu_len - 1; end >= 0; end--)
1295 		if (pdu_buf[end])
1296 			break;
1297 	end++;
1298 
1299 	trace_seq_putc(s, '(');
1300 
1301 	for (i = 0; i < pdu_len; i++) {
1302 
1303 		trace_seq_printf(s, "%s%02x",
1304 				 i == 0 ? "" : " ", pdu_buf[i]);
1305 
1306 		/*
1307 		 * stop when the rest is just zeroes and indicate so
1308 		 * with a ".." appended
1309 		 */
1310 		if (i == end && end != pdu_len - 1) {
1311 			trace_seq_puts(s, " ..) ");
1312 			return;
1313 		}
1314 	}
1315 
1316 	trace_seq_puts(s, ") ");
1317 }
1318 
1319 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1320 {
1321 	char cmd[TASK_COMM_LEN];
1322 
1323 	trace_find_cmdline(ent->pid, cmd);
1324 
1325 	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1326 		trace_seq_printf(s, "%u ", t_bytes(ent));
1327 		blk_log_dump_pdu(s, ent, has_cg);
1328 		trace_seq_printf(s, "[%s]\n", cmd);
1329 	} else {
1330 		if (t_sec(ent))
1331 			trace_seq_printf(s, "%llu + %u [%s]\n",
1332 						t_sector(ent), t_sec(ent), cmd);
1333 		else
1334 			trace_seq_printf(s, "[%s]\n", cmd);
1335 	}
1336 }
1337 
1338 static void blk_log_with_error(struct trace_seq *s,
1339 			      const struct trace_entry *ent, bool has_cg)
1340 {
1341 	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1342 		blk_log_dump_pdu(s, ent, has_cg);
1343 		trace_seq_printf(s, "[%d]\n", t_error(ent));
1344 	} else {
1345 		if (t_sec(ent))
1346 			trace_seq_printf(s, "%llu + %u [%d]\n",
1347 					 t_sector(ent),
1348 					 t_sec(ent), t_error(ent));
1349 		else
1350 			trace_seq_printf(s, "%llu [%d]\n",
1351 					 t_sector(ent), t_error(ent));
1352 	}
1353 }
1354 
1355 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1356 {
1357 	struct blk_io_trace_remap r = { .device_from = 0, };
1358 
1359 	get_pdu_remap(ent, &r, has_cg);
1360 	trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1361 			 t_sector(ent), t_sec(ent),
1362 			 MAJOR(r.device_from), MINOR(r.device_from),
1363 			 (unsigned long long)r.sector_from);
1364 }
1365 
1366 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1367 {
1368 	char cmd[TASK_COMM_LEN];
1369 
1370 	trace_find_cmdline(ent->pid, cmd);
1371 
1372 	trace_seq_printf(s, "[%s]\n", cmd);
1373 }
1374 
1375 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1376 {
1377 	char cmd[TASK_COMM_LEN];
1378 
1379 	trace_find_cmdline(ent->pid, cmd);
1380 
1381 	trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1382 }
1383 
1384 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1385 {
1386 	char cmd[TASK_COMM_LEN];
1387 
1388 	trace_find_cmdline(ent->pid, cmd);
1389 
1390 	trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1391 			 get_pdu_int(ent, has_cg), cmd);
1392 }
1393 
1394 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1395 			bool has_cg)
1396 {
1397 
1398 	trace_seq_putmem(s, pdu_start(ent, has_cg),
1399 		pdu_real_len(ent, has_cg));
1400 	trace_seq_putc(s, '\n');
1401 }
1402 
1403 /*
1404  * struct tracer operations
1405  */
1406 
1407 static void blk_tracer_print_header(struct seq_file *m)
1408 {
1409 	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1410 		return;
1411 	seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1412 		    "#  |     |     |           |   |   |\n");
1413 }
1414 
1415 static void blk_tracer_start(struct trace_array *tr)
1416 {
1417 	blk_tracer_enabled = true;
1418 }
1419 
1420 static int blk_tracer_init(struct trace_array *tr)
1421 {
1422 	blk_tr = tr;
1423 	blk_tracer_start(tr);
1424 	return 0;
1425 }
1426 
1427 static void blk_tracer_stop(struct trace_array *tr)
1428 {
1429 	blk_tracer_enabled = false;
1430 }
1431 
1432 static void blk_tracer_reset(struct trace_array *tr)
1433 {
1434 	blk_tracer_stop(tr);
1435 }
1436 
1437 static const struct {
1438 	const char *act[2];
1439 	void	   (*print)(struct trace_seq *s, const struct trace_entry *ent,
1440 			    bool has_cg);
1441 } what2act[] = {
1442 	[__BLK_TA_QUEUE]	= {{  "Q", "queue" },	   blk_log_generic },
1443 	[__BLK_TA_BACKMERGE]	= {{  "M", "backmerge" },  blk_log_generic },
1444 	[__BLK_TA_FRONTMERGE]	= {{  "F", "frontmerge" }, blk_log_generic },
1445 	[__BLK_TA_GETRQ]	= {{  "G", "getrq" },	   blk_log_generic },
1446 	[__BLK_TA_SLEEPRQ]	= {{  "S", "sleeprq" },	   blk_log_generic },
1447 	[__BLK_TA_REQUEUE]	= {{  "R", "requeue" },	   blk_log_with_error },
1448 	[__BLK_TA_ISSUE]	= {{  "D", "issue" },	   blk_log_generic },
1449 	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
1450 	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
1451 	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
1452 	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
1453 	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
1454 	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
1455 	[__BLK_TA_BOUNCE]	= {{  "B", "bounce" },	   blk_log_generic },
1456 	[__BLK_TA_REMAP]	= {{  "A", "remap" },	   blk_log_remap },
1457 };
1458 
1459 static enum print_line_t print_one_line(struct trace_iterator *iter,
1460 					bool classic)
1461 {
1462 	struct trace_array *tr = iter->tr;
1463 	struct trace_seq *s = &iter->seq;
1464 	const struct blk_io_trace *t;
1465 	u16 what;
1466 	bool long_act;
1467 	blk_log_action_t *log_action;
1468 	bool has_cg;
1469 
1470 	t	   = te_blk_io_trace(iter->ent);
1471 	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1472 	long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1473 	log_action = classic ? &blk_log_action_classic : &blk_log_action;
1474 	has_cg	   = t->action & __BLK_TA_CGROUP;
1475 
1476 	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1477 		log_action(iter, long_act ? "message" : "m", has_cg);
1478 		blk_log_msg(s, iter->ent, has_cg);
1479 		return trace_handle_return(s);
1480 	}
1481 
1482 	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1483 		trace_seq_printf(s, "Unknown action %x\n", what);
1484 	else {
1485 		log_action(iter, what2act[what].act[long_act], has_cg);
1486 		what2act[what].print(s, iter->ent, has_cg);
1487 	}
1488 
1489 	return trace_handle_return(s);
1490 }
1491 
1492 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1493 					       int flags, struct trace_event *event)
1494 {
1495 	return print_one_line(iter, false);
1496 }
1497 
1498 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1499 {
1500 	struct trace_seq *s = &iter->seq;
1501 	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1502 	const int offset = offsetof(struct blk_io_trace, sector);
1503 	struct blk_io_trace old = {
1504 		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1505 		.time     = iter->ts,
1506 	};
1507 
1508 	trace_seq_putmem(s, &old, offset);
1509 	trace_seq_putmem(s, &t->sector,
1510 			 sizeof(old) - offset + t->pdu_len);
1511 }
1512 
1513 static enum print_line_t
1514 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1515 			     struct trace_event *event)
1516 {
1517 	blk_trace_synthesize_old_trace(iter);
1518 
1519 	return trace_handle_return(&iter->seq);
1520 }
1521 
1522 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1523 {
1524 	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1525 		return TRACE_TYPE_UNHANDLED;
1526 
1527 	return print_one_line(iter, true);
1528 }
1529 
1530 static int
1531 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1532 {
1533 	/* don't output context-info for blk_classic output */
1534 	if (bit == TRACE_BLK_OPT_CLASSIC) {
1535 		if (set)
1536 			tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1537 		else
1538 			tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1539 	}
1540 	return 0;
1541 }
1542 
1543 static struct tracer blk_tracer __read_mostly = {
1544 	.name		= "blk",
1545 	.init		= blk_tracer_init,
1546 	.reset		= blk_tracer_reset,
1547 	.start		= blk_tracer_start,
1548 	.stop		= blk_tracer_stop,
1549 	.print_header	= blk_tracer_print_header,
1550 	.print_line	= blk_tracer_print_line,
1551 	.flags		= &blk_tracer_flags,
1552 	.set_flag	= blk_tracer_set_flag,
1553 };
1554 
1555 static struct trace_event_functions trace_blk_event_funcs = {
1556 	.trace		= blk_trace_event_print,
1557 	.binary		= blk_trace_event_print_binary,
1558 };
1559 
1560 static struct trace_event trace_blk_event = {
1561 	.type		= TRACE_BLK,
1562 	.funcs		= &trace_blk_event_funcs,
1563 };
1564 
1565 static int __init init_blk_tracer(void)
1566 {
1567 	if (!register_trace_event(&trace_blk_event)) {
1568 		pr_warn("Warning: could not register block events\n");
1569 		return 1;
1570 	}
1571 
1572 	if (register_tracer(&blk_tracer) != 0) {
1573 		pr_warn("Warning: could not register the block tracer\n");
1574 		unregister_trace_event(&trace_blk_event);
1575 		return 1;
1576 	}
1577 
1578 	return 0;
1579 }
1580 
1581 device_initcall(init_blk_tracer);
1582 
1583 static int blk_trace_remove_queue(struct request_queue *q)
1584 {
1585 	struct blk_trace *bt;
1586 
1587 	bt = xchg(&q->blk_trace, NULL);
1588 	if (bt == NULL)
1589 		return -EINVAL;
1590 
1591 	put_probe_ref();
1592 	blk_trace_free(bt);
1593 	return 0;
1594 }
1595 
1596 /*
1597  * Setup everything required to start tracing
1598  */
1599 static int blk_trace_setup_queue(struct request_queue *q,
1600 				 struct block_device *bdev)
1601 {
1602 	struct blk_trace *bt = NULL;
1603 	int ret = -ENOMEM;
1604 
1605 	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1606 	if (!bt)
1607 		return -ENOMEM;
1608 
1609 	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1610 	if (!bt->msg_data)
1611 		goto free_bt;
1612 
1613 	bt->dev = bdev->bd_dev;
1614 	bt->act_mask = (u16)-1;
1615 
1616 	blk_trace_setup_lba(bt, bdev);
1617 
1618 	ret = -EBUSY;
1619 	if (cmpxchg(&q->blk_trace, NULL, bt))
1620 		goto free_bt;
1621 
1622 	get_probe_ref();
1623 	return 0;
1624 
1625 free_bt:
1626 	blk_trace_free(bt);
1627 	return ret;
1628 }
1629 
1630 /*
1631  * sysfs interface to enable and configure tracing
1632  */
1633 
1634 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1635 					 struct device_attribute *attr,
1636 					 char *buf);
1637 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1638 					  struct device_attribute *attr,
1639 					  const char *buf, size_t count);
1640 #define BLK_TRACE_DEVICE_ATTR(_name) \
1641 	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1642 		    sysfs_blk_trace_attr_show, \
1643 		    sysfs_blk_trace_attr_store)
1644 
1645 static BLK_TRACE_DEVICE_ATTR(enable);
1646 static BLK_TRACE_DEVICE_ATTR(act_mask);
1647 static BLK_TRACE_DEVICE_ATTR(pid);
1648 static BLK_TRACE_DEVICE_ATTR(start_lba);
1649 static BLK_TRACE_DEVICE_ATTR(end_lba);
1650 
1651 static struct attribute *blk_trace_attrs[] = {
1652 	&dev_attr_enable.attr,
1653 	&dev_attr_act_mask.attr,
1654 	&dev_attr_pid.attr,
1655 	&dev_attr_start_lba.attr,
1656 	&dev_attr_end_lba.attr,
1657 	NULL
1658 };
1659 
1660 struct attribute_group blk_trace_attr_group = {
1661 	.name  = "trace",
1662 	.attrs = blk_trace_attrs,
1663 };
1664 
1665 static const struct {
1666 	int mask;
1667 	const char *str;
1668 } mask_maps[] = {
1669 	{ BLK_TC_READ,		"read"		},
1670 	{ BLK_TC_WRITE,		"write"		},
1671 	{ BLK_TC_FLUSH,		"flush"		},
1672 	{ BLK_TC_SYNC,		"sync"		},
1673 	{ BLK_TC_QUEUE,		"queue"		},
1674 	{ BLK_TC_REQUEUE,	"requeue"	},
1675 	{ BLK_TC_ISSUE,		"issue"		},
1676 	{ BLK_TC_COMPLETE,	"complete"	},
1677 	{ BLK_TC_FS,		"fs"		},
1678 	{ BLK_TC_PC,		"pc"		},
1679 	{ BLK_TC_NOTIFY,	"notify"	},
1680 	{ BLK_TC_AHEAD,		"ahead"		},
1681 	{ BLK_TC_META,		"meta"		},
1682 	{ BLK_TC_DISCARD,	"discard"	},
1683 	{ BLK_TC_DRV_DATA,	"drv_data"	},
1684 	{ BLK_TC_FUA,		"fua"		},
1685 };
1686 
1687 static int blk_trace_str2mask(const char *str)
1688 {
1689 	int i;
1690 	int mask = 0;
1691 	char *buf, *s, *token;
1692 
1693 	buf = kstrdup(str, GFP_KERNEL);
1694 	if (buf == NULL)
1695 		return -ENOMEM;
1696 	s = strstrip(buf);
1697 
1698 	while (1) {
1699 		token = strsep(&s, ",");
1700 		if (token == NULL)
1701 			break;
1702 
1703 		if (*token == '\0')
1704 			continue;
1705 
1706 		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1707 			if (strcasecmp(token, mask_maps[i].str) == 0) {
1708 				mask |= mask_maps[i].mask;
1709 				break;
1710 			}
1711 		}
1712 		if (i == ARRAY_SIZE(mask_maps)) {
1713 			mask = -EINVAL;
1714 			break;
1715 		}
1716 	}
1717 	kfree(buf);
1718 
1719 	return mask;
1720 }
1721 
1722 static ssize_t blk_trace_mask2str(char *buf, int mask)
1723 {
1724 	int i;
1725 	char *p = buf;
1726 
1727 	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1728 		if (mask & mask_maps[i].mask) {
1729 			p += sprintf(p, "%s%s",
1730 				    (p == buf) ? "" : ",", mask_maps[i].str);
1731 		}
1732 	}
1733 	*p++ = '\n';
1734 
1735 	return p - buf;
1736 }
1737 
1738 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1739 {
1740 	if (bdev->bd_disk == NULL)
1741 		return NULL;
1742 
1743 	return bdev_get_queue(bdev);
1744 }
1745 
1746 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1747 					 struct device_attribute *attr,
1748 					 char *buf)
1749 {
1750 	struct hd_struct *p = dev_to_part(dev);
1751 	struct request_queue *q;
1752 	struct block_device *bdev;
1753 	ssize_t ret = -ENXIO;
1754 
1755 	bdev = bdget(part_devt(p));
1756 	if (bdev == NULL)
1757 		goto out;
1758 
1759 	q = blk_trace_get_queue(bdev);
1760 	if (q == NULL)
1761 		goto out_bdput;
1762 
1763 	mutex_lock(&q->blk_trace_mutex);
1764 
1765 	if (attr == &dev_attr_enable) {
1766 		ret = sprintf(buf, "%u\n", !!q->blk_trace);
1767 		goto out_unlock_bdev;
1768 	}
1769 
1770 	if (q->blk_trace == NULL)
1771 		ret = sprintf(buf, "disabled\n");
1772 	else if (attr == &dev_attr_act_mask)
1773 		ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1774 	else if (attr == &dev_attr_pid)
1775 		ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1776 	else if (attr == &dev_attr_start_lba)
1777 		ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1778 	else if (attr == &dev_attr_end_lba)
1779 		ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1780 
1781 out_unlock_bdev:
1782 	mutex_unlock(&q->blk_trace_mutex);
1783 out_bdput:
1784 	bdput(bdev);
1785 out:
1786 	return ret;
1787 }
1788 
1789 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1790 					  struct device_attribute *attr,
1791 					  const char *buf, size_t count)
1792 {
1793 	struct block_device *bdev;
1794 	struct request_queue *q;
1795 	struct hd_struct *p;
1796 	u64 value;
1797 	ssize_t ret = -EINVAL;
1798 
1799 	if (count == 0)
1800 		goto out;
1801 
1802 	if (attr == &dev_attr_act_mask) {
1803 		if (kstrtoull(buf, 0, &value)) {
1804 			/* Assume it is a list of trace category names */
1805 			ret = blk_trace_str2mask(buf);
1806 			if (ret < 0)
1807 				goto out;
1808 			value = ret;
1809 		}
1810 	} else if (kstrtoull(buf, 0, &value))
1811 		goto out;
1812 
1813 	ret = -ENXIO;
1814 
1815 	p = dev_to_part(dev);
1816 	bdev = bdget(part_devt(p));
1817 	if (bdev == NULL)
1818 		goto out;
1819 
1820 	q = blk_trace_get_queue(bdev);
1821 	if (q == NULL)
1822 		goto out_bdput;
1823 
1824 	mutex_lock(&q->blk_trace_mutex);
1825 
1826 	if (attr == &dev_attr_enable) {
1827 		if (!!value == !!q->blk_trace) {
1828 			ret = 0;
1829 			goto out_unlock_bdev;
1830 		}
1831 		if (value)
1832 			ret = blk_trace_setup_queue(q, bdev);
1833 		else
1834 			ret = blk_trace_remove_queue(q);
1835 		goto out_unlock_bdev;
1836 	}
1837 
1838 	ret = 0;
1839 	if (q->blk_trace == NULL)
1840 		ret = blk_trace_setup_queue(q, bdev);
1841 
1842 	if (ret == 0) {
1843 		if (attr == &dev_attr_act_mask)
1844 			q->blk_trace->act_mask = value;
1845 		else if (attr == &dev_attr_pid)
1846 			q->blk_trace->pid = value;
1847 		else if (attr == &dev_attr_start_lba)
1848 			q->blk_trace->start_lba = value;
1849 		else if (attr == &dev_attr_end_lba)
1850 			q->blk_trace->end_lba = value;
1851 	}
1852 
1853 out_unlock_bdev:
1854 	mutex_unlock(&q->blk_trace_mutex);
1855 out_bdput:
1856 	bdput(bdev);
1857 out:
1858 	return ret ? ret : count;
1859 }
1860 
1861 int blk_trace_init_sysfs(struct device *dev)
1862 {
1863 	return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1864 }
1865 
1866 void blk_trace_remove_sysfs(struct device *dev)
1867 {
1868 	sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1869 }
1870 
1871 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1872 
1873 #ifdef CONFIG_EVENT_TRACING
1874 
1875 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1876 {
1877 	int i = 0;
1878 
1879 	if (op & REQ_PREFLUSH)
1880 		rwbs[i++] = 'F';
1881 
1882 	switch (op & REQ_OP_MASK) {
1883 	case REQ_OP_WRITE:
1884 	case REQ_OP_WRITE_SAME:
1885 		rwbs[i++] = 'W';
1886 		break;
1887 	case REQ_OP_DISCARD:
1888 		rwbs[i++] = 'D';
1889 		break;
1890 	case REQ_OP_SECURE_ERASE:
1891 		rwbs[i++] = 'D';
1892 		rwbs[i++] = 'E';
1893 		break;
1894 	case REQ_OP_FLUSH:
1895 		rwbs[i++] = 'F';
1896 		break;
1897 	case REQ_OP_READ:
1898 		rwbs[i++] = 'R';
1899 		break;
1900 	default:
1901 		rwbs[i++] = 'N';
1902 	}
1903 
1904 	if (op & REQ_FUA)
1905 		rwbs[i++] = 'F';
1906 	if (op & REQ_RAHEAD)
1907 		rwbs[i++] = 'A';
1908 	if (op & REQ_SYNC)
1909 		rwbs[i++] = 'S';
1910 	if (op & REQ_META)
1911 		rwbs[i++] = 'M';
1912 
1913 	rwbs[i] = '\0';
1914 }
1915 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1916 
1917 #endif /* CONFIG_EVENT_TRACING */
1918 
1919