1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 *
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/percpu.h>
13 #include <linux/init.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/debugfs.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/blk-cgroup.h>
22
23 #include "../../block/blk.h"
24
25 #include <trace/events/block.h>
26
27 #include "trace_output.h"
28
29 #ifdef CONFIG_BLK_DEV_IO_TRACE
30
31 static unsigned int blktrace_seq __read_mostly = 1;
32
33 static struct trace_array *blk_tr;
34 static bool blk_tracer_enabled __read_mostly;
35
36 static LIST_HEAD(running_trace_list);
37 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
38
39 /* Select an alternative, minimalistic output than the original one */
40 #define TRACE_BLK_OPT_CLASSIC 0x1
41 #define TRACE_BLK_OPT_CGROUP 0x2
42 #define TRACE_BLK_OPT_CGNAME 0x4
43
44 static struct tracer_opt blk_tracer_opts[] = {
45 /* Default disable the minimalistic output */
46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47 #ifdef CONFIG_BLK_CGROUP
48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
50 #endif
51 { }
52 };
53
54 static struct tracer_flags blk_tracer_flags = {
55 .val = 0,
56 .opts = blk_tracer_opts,
57 };
58
59 /* Global reference count of probes */
60 static DEFINE_MUTEX(blk_probe_mutex);
61 static int blk_probes_ref;
62
63 static void blk_register_tracepoints(void);
64 static void blk_unregister_tracepoints(void);
65
record_blktrace_event(struct blk_io_trace * t,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,dev_t dev,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)66 static void record_blktrace_event(struct blk_io_trace *t, pid_t pid, int cpu,
67 sector_t sector, int bytes, u64 what,
68 dev_t dev, int error, u64 cgid,
69 ssize_t cgid_len, void *pdu_data, int pdu_len)
70
71 {
72 /*
73 * These two are not needed in ftrace as they are in the
74 * generic trace_entry, filled by tracing_generic_entry_update,
75 * but for the trace_event->bin() synthesizer benefit we do it
76 * here too.
77 */
78 t->cpu = cpu;
79 t->pid = pid;
80
81 t->sector = sector;
82 t->bytes = bytes;
83 t->action = lower_32_bits(what);
84 t->device = dev;
85 t->error = error;
86 t->pdu_len = pdu_len + cgid_len;
87
88 if (cgid_len)
89 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
90 if (pdu_len)
91 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
92 }
93
record_blktrace_event2(struct blk_io_trace2 * t2,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,dev_t dev,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)94 static void record_blktrace_event2(struct blk_io_trace2 *t2, pid_t pid, int cpu,
95 sector_t sector, int bytes, u64 what,
96 dev_t dev, int error, u64 cgid,
97 ssize_t cgid_len, void *pdu_data,
98 int pdu_len)
99 {
100 t2->pid = pid;
101 t2->cpu = cpu;
102
103 t2->sector = sector;
104 t2->bytes = bytes;
105 t2->action = what;
106 t2->device = dev;
107 t2->error = error;
108 t2->pdu_len = pdu_len + cgid_len;
109
110 if (cgid_len)
111 memcpy((void *)t2 + sizeof(*t2), &cgid, cgid_len);
112 if (pdu_len)
113 memcpy((void *)t2 + sizeof(*t2) + cgid_len, pdu_data, pdu_len);
114 }
115
relay_blktrace_event1(struct blk_trace * bt,unsigned long sequence,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)116 static void relay_blktrace_event1(struct blk_trace *bt, unsigned long sequence,
117 pid_t pid, int cpu, sector_t sector, int bytes,
118 u64 what, int error, u64 cgid,
119 ssize_t cgid_len, void *pdu_data, int pdu_len)
120 {
121 struct blk_io_trace *t;
122 size_t trace_len = sizeof(*t) + pdu_len + cgid_len;
123
124 t = relay_reserve(bt->rchan, trace_len);
125 if (!t)
126 return;
127
128 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
129 t->sequence = sequence;
130 t->time = ktime_to_ns(ktime_get());
131
132 record_blktrace_event(t, pid, cpu, sector, bytes, what, bt->dev, error,
133 cgid, cgid_len, pdu_data, pdu_len);
134 }
135
relay_blktrace_event2(struct blk_trace * bt,unsigned long sequence,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)136 static void relay_blktrace_event2(struct blk_trace *bt, unsigned long sequence,
137 pid_t pid, int cpu, sector_t sector,
138 int bytes, u64 what, int error, u64 cgid,
139 ssize_t cgid_len, void *pdu_data, int pdu_len)
140 {
141 struct blk_io_trace2 *t;
142 size_t trace_len = sizeof(struct blk_io_trace2) + pdu_len + cgid_len;
143
144 t = relay_reserve(bt->rchan, trace_len);
145 if (!t)
146 return;
147
148 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE2_VERSION;
149 t->sequence = sequence;
150 t->time = ktime_to_ns(ktime_get());
151
152 record_blktrace_event2(t, pid, cpu, sector, bytes, what, bt->dev, error,
153 cgid, cgid_len, pdu_data, pdu_len);
154 }
155
relay_blktrace_event(struct blk_trace * bt,unsigned long sequence,pid_t pid,int cpu,sector_t sector,int bytes,u64 what,int error,u64 cgid,ssize_t cgid_len,void * pdu_data,int pdu_len)156 static void relay_blktrace_event(struct blk_trace *bt, unsigned long sequence,
157 pid_t pid, int cpu, sector_t sector, int bytes,
158 u64 what, int error, u64 cgid,
159 ssize_t cgid_len, void *pdu_data, int pdu_len)
160 {
161 if (bt->version == 2)
162 return relay_blktrace_event2(bt, sequence, pid, cpu, sector,
163 bytes, what, error, cgid, cgid_len,
164 pdu_data, pdu_len);
165 return relay_blktrace_event1(bt, sequence, pid, cpu, sector, bytes,
166 what, error, cgid, cgid_len, pdu_data,
167 pdu_len);
168 }
169
170 /*
171 * Send out a notify message.
172 */
trace_note(struct blk_trace * bt,pid_t pid,u64 action,const void * data,size_t len,u64 cgid)173 static void trace_note(struct blk_trace *bt, pid_t pid, u64 action,
174 const void *data, size_t len, u64 cgid)
175 {
176 struct ring_buffer_event *event = NULL;
177 struct trace_buffer *buffer = NULL;
178 unsigned int trace_ctx = 0;
179 int cpu = smp_processor_id();
180 bool blk_tracer = blk_tracer_enabled;
181 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
182
183 action = lower_32_bits(action | (cgid ? __BLK_TN_CGROUP : 0));
184 if (blk_tracer) {
185 struct blk_io_trace2 *t;
186 size_t trace_len = sizeof(*t) + cgid_len + len;
187
188 buffer = blk_tr->array_buffer.buffer;
189 trace_ctx = tracing_gen_ctx_flags(0);
190 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
191 trace_len, trace_ctx);
192 if (!event)
193 return;
194 t = ring_buffer_event_data(event);
195 record_blktrace_event2(t, pid, cpu, 0, 0,
196 action, bt->dev, 0, cgid, cgid_len,
197 (void *)data, len);
198 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
199 return;
200 }
201
202 if (!bt->rchan)
203 return;
204
205 relay_blktrace_event(bt, 0, pid, cpu, 0, 0, action, 0, cgid,
206 cgid_len, (void *)data, len);
207 }
208
209 /*
210 * Send out a notify for this process, if we haven't done so since a trace
211 * started
212 */
trace_note_tsk(struct task_struct * tsk)213 static void trace_note_tsk(struct task_struct *tsk)
214 {
215 unsigned long flags;
216 struct blk_trace *bt;
217
218 tsk->btrace_seq = blktrace_seq;
219 raw_spin_lock_irqsave(&running_trace_lock, flags);
220 list_for_each_entry(bt, &running_trace_list, running_list) {
221 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
222 sizeof(tsk->comm), 0);
223 }
224 raw_spin_unlock_irqrestore(&running_trace_lock, flags);
225 }
226
trace_note_time(struct blk_trace * bt)227 static void trace_note_time(struct blk_trace *bt)
228 {
229 struct timespec64 now;
230 unsigned long flags;
231 u32 words[2];
232
233 /* need to check user space to see if this breaks in y2038 or y2106 */
234 ktime_get_real_ts64(&now);
235 words[0] = (u32)now.tv_sec;
236 words[1] = now.tv_nsec;
237
238 local_irq_save(flags);
239 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
240 local_irq_restore(flags);
241 }
242
__blk_trace_note_message(struct blk_trace * bt,struct cgroup_subsys_state * css,const char * fmt,...)243 void __blk_trace_note_message(struct blk_trace *bt,
244 struct cgroup_subsys_state *css, const char *fmt, ...)
245 {
246 int n;
247 va_list args;
248 unsigned long flags;
249 char *buf;
250 u64 cgid = 0;
251
252 if (unlikely(bt->trace_state != Blktrace_running &&
253 !blk_tracer_enabled))
254 return;
255
256 /*
257 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
258 * message to the trace.
259 */
260 if (!(bt->act_mask & BLK_TC_NOTIFY))
261 return;
262
263 local_irq_save(flags);
264 buf = this_cpu_ptr(bt->msg_data);
265 va_start(args, fmt);
266 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
267 va_end(args);
268
269 #ifdef CONFIG_BLK_CGROUP
270 if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
271 cgid = cgroup_id(css->cgroup);
272 else
273 cgid = 1;
274 #endif
275 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid);
276 local_irq_restore(flags);
277 }
278 EXPORT_SYMBOL_GPL(__blk_trace_note_message);
279
act_log_check(struct blk_trace * bt,u64 what,sector_t sector,pid_t pid)280 static int act_log_check(struct blk_trace *bt, u64 what, sector_t sector,
281 pid_t pid)
282 {
283 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
284 return 1;
285 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
286 return 1;
287 if (bt->pid && pid != bt->pid)
288 return 1;
289
290 return 0;
291 }
292
293 /*
294 * Data direction bit lookup
295 */
296 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
297 BLK_TC_ACT(BLK_TC_WRITE) };
298
299 #define BLK_TC_RAHEAD BLK_TC_AHEAD
300 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
301
302 /* The ilog2() calls fall out because they're constant */
303 #define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \
304 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
305
306 /*
307 * The worker for the various blk_add_trace*() types. Fills out a
308 * blk_io_trace structure and places it in a per-cpu subbuffer.
309 */
__blk_add_trace(struct blk_trace * bt,sector_t sector,int bytes,const blk_opf_t opf,u64 what,int error,int pdu_len,void * pdu_data,u64 cgid)310 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
311 const blk_opf_t opf, u64 what, int error,
312 int pdu_len, void *pdu_data, u64 cgid)
313 {
314 struct task_struct *tsk = current;
315 struct ring_buffer_event *event = NULL;
316 struct trace_buffer *buffer = NULL;
317 unsigned long flags = 0;
318 unsigned long *sequence;
319 unsigned int trace_ctx = 0;
320 pid_t pid;
321 int cpu;
322 bool blk_tracer = blk_tracer_enabled;
323 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
324 const enum req_op op = opf & REQ_OP_MASK;
325 size_t trace_len;
326
327 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
328 return;
329
330 what |= ddir_act[op_is_write(op) ? WRITE : READ];
331 what |= MASK_TC_BIT(opf, SYNC);
332 what |= MASK_TC_BIT(opf, RAHEAD);
333 what |= MASK_TC_BIT(opf, META);
334 what |= MASK_TC_BIT(opf, PREFLUSH);
335 what |= MASK_TC_BIT(opf, FUA);
336
337 switch (op) {
338 case REQ_OP_DISCARD:
339 case REQ_OP_SECURE_ERASE:
340 what |= BLK_TC_ACT(BLK_TC_DISCARD);
341 break;
342 case REQ_OP_FLUSH:
343 what |= BLK_TC_ACT(BLK_TC_FLUSH);
344 break;
345 case REQ_OP_ZONE_APPEND:
346 what |= BLK_TC_ACT(BLK_TC_ZONE_APPEND);
347 break;
348 case REQ_OP_ZONE_RESET:
349 what |= BLK_TC_ACT(BLK_TC_ZONE_RESET);
350 break;
351 case REQ_OP_ZONE_RESET_ALL:
352 what |= BLK_TC_ACT(BLK_TC_ZONE_RESET_ALL);
353 break;
354 case REQ_OP_ZONE_FINISH:
355 what |= BLK_TC_ACT(BLK_TC_ZONE_FINISH);
356 break;
357 case REQ_OP_ZONE_OPEN:
358 what |= BLK_TC_ACT(BLK_TC_ZONE_OPEN);
359 break;
360 case REQ_OP_ZONE_CLOSE:
361 what |= BLK_TC_ACT(BLK_TC_ZONE_CLOSE);
362 break;
363 case REQ_OP_WRITE_ZEROES:
364 what |= BLK_TC_ACT(BLK_TC_WRITE_ZEROES);
365 break;
366 default:
367 break;
368 }
369
370 /* Drop trace events for zone operations with blktrace v1 */
371 if (bt->version == 1 && (what >> BLK_TC_SHIFT) > BLK_TC_END_V1) {
372 pr_debug_ratelimited("blktrace v1 cannot trace zone operation 0x%llx\n",
373 (unsigned long long)what);
374 return;
375 }
376
377 if (cgid)
378 what |= __BLK_TA_CGROUP;
379
380 pid = tsk->pid;
381 if (act_log_check(bt, what, sector, pid))
382 return;
383 cpu = raw_smp_processor_id();
384
385 if (blk_tracer) {
386 tracing_record_cmdline(current);
387
388 buffer = blk_tr->array_buffer.buffer;
389 trace_ctx = tracing_gen_ctx_flags(0);
390 switch (bt->version) {
391 case 1:
392 trace_len = sizeof(struct blk_io_trace);
393 break;
394 case 2:
395 default:
396 /*
397 * ftrace always uses v2 (blk_io_trace2) format.
398 *
399 * For sysfs-enabled tracing path (enabled via
400 * /sys/block/DEV/trace/enable), blk_trace_setup_queue()
401 * never initializes bt->version, leaving it 0 from
402 * kzalloc(). We must handle version==0 safely here.
403 *
404 * Fall through to default to ensure we never hit the
405 * old bug where default set trace_len=0, causing
406 * buffer underflow and memory corruption.
407 *
408 * Always use v2 format for ftrace and normalize
409 * bt->version to 2 when uninitialized.
410 */
411 trace_len = sizeof(struct blk_io_trace2);
412 if (bt->version == 0)
413 bt->version = 2;
414 break;
415 }
416 trace_len += pdu_len + cgid_len;
417 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
418 trace_len, trace_ctx);
419 if (!event)
420 return;
421
422 switch (bt->version) {
423 case 1:
424 record_blktrace_event(ring_buffer_event_data(event),
425 pid, cpu, sector, bytes,
426 what, bt->dev, error, cgid, cgid_len,
427 pdu_data, pdu_len);
428 break;
429 case 2:
430 default:
431 /*
432 * Use v2 recording function (record_blktrace_event2)
433 * which writes blk_io_trace2 structure with correct
434 * field layout:
435 * - 32-bit pid at offset 28
436 * - 64-bit action at offset 32
437 *
438 * Fall through to default handles version==0 case
439 * (from sysfs path), ensuring we always use correct
440 * v2 recording function to match the v2 buffer
441 * allocated above.
442 */
443 record_blktrace_event2(ring_buffer_event_data(event),
444 pid, cpu, sector, bytes,
445 what, bt->dev, error, cgid, cgid_len,
446 pdu_data, pdu_len);
447 break;
448 }
449
450 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
451 return;
452 }
453
454 if (unlikely(tsk->btrace_seq != blktrace_seq))
455 trace_note_tsk(tsk);
456
457 /*
458 * A word about the locking here - we disable interrupts to reserve
459 * some space in the relay per-cpu buffer, to prevent an irq
460 * from coming in and stepping on our toes.
461 */
462 local_irq_save(flags);
463 sequence = per_cpu_ptr(bt->sequence, cpu);
464 (*sequence)++;
465 relay_blktrace_event(bt, *sequence, pid, cpu, sector, bytes,
466 what, error, cgid, cgid_len, pdu_data, pdu_len);
467 local_irq_restore(flags);
468 }
469
blk_trace_free(struct request_queue * q,struct blk_trace * bt)470 static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
471 {
472 relay_close(bt->rchan);
473
474 /*
475 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
476 * under 'q->debugfs_dir', thus lookup and remove them.
477 */
478 if (!bt->dir) {
479 debugfs_lookup_and_remove("dropped", q->debugfs_dir);
480 debugfs_lookup_and_remove("msg", q->debugfs_dir);
481 } else {
482 debugfs_remove(bt->dir);
483 }
484 free_percpu(bt->sequence);
485 free_percpu(bt->msg_data);
486 kfree(bt);
487 }
488
get_probe_ref(void)489 static void get_probe_ref(void)
490 {
491 mutex_lock(&blk_probe_mutex);
492 if (++blk_probes_ref == 1)
493 blk_register_tracepoints();
494 mutex_unlock(&blk_probe_mutex);
495 }
496
put_probe_ref(void)497 static void put_probe_ref(void)
498 {
499 mutex_lock(&blk_probe_mutex);
500 if (!--blk_probes_ref)
501 blk_unregister_tracepoints();
502 mutex_unlock(&blk_probe_mutex);
503 }
504
blk_trace_start(struct blk_trace * bt)505 static int blk_trace_start(struct blk_trace *bt)
506 {
507 if (bt->trace_state != Blktrace_setup &&
508 bt->trace_state != Blktrace_stopped)
509 return -EINVAL;
510
511 blktrace_seq++;
512 smp_mb();
513 bt->trace_state = Blktrace_running;
514 raw_spin_lock_irq(&running_trace_lock);
515 list_add(&bt->running_list, &running_trace_list);
516 raw_spin_unlock_irq(&running_trace_lock);
517 trace_note_time(bt);
518
519 return 0;
520 }
521
blk_trace_stop(struct blk_trace * bt)522 static int blk_trace_stop(struct blk_trace *bt)
523 {
524 if (bt->trace_state != Blktrace_running)
525 return -EINVAL;
526
527 bt->trace_state = Blktrace_stopped;
528 raw_spin_lock_irq(&running_trace_lock);
529 list_del_init(&bt->running_list);
530 raw_spin_unlock_irq(&running_trace_lock);
531 relay_flush(bt->rchan);
532
533 return 0;
534 }
535
blk_trace_cleanup(struct request_queue * q,struct blk_trace * bt)536 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
537 {
538 blk_trace_stop(bt);
539 synchronize_rcu();
540 blk_trace_free(q, bt);
541 put_probe_ref();
542 }
543
__blk_trace_remove(struct request_queue * q)544 static int __blk_trace_remove(struct request_queue *q)
545 {
546 struct blk_trace *bt;
547
548 bt = rcu_replace_pointer(q->blk_trace, NULL,
549 lockdep_is_held(&q->debugfs_mutex));
550 if (!bt)
551 return -EINVAL;
552
553 blk_trace_cleanup(q, bt);
554
555 return 0;
556 }
557
blk_trace_remove(struct request_queue * q)558 int blk_trace_remove(struct request_queue *q)
559 {
560 int ret;
561
562 mutex_lock(&q->debugfs_mutex);
563 ret = __blk_trace_remove(q);
564 mutex_unlock(&q->debugfs_mutex);
565
566 return ret;
567 }
568 EXPORT_SYMBOL_GPL(blk_trace_remove);
569
blk_dropped_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)570 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
571 size_t count, loff_t *ppos)
572 {
573 struct blk_trace *bt = filp->private_data;
574 size_t dropped = relay_stats(bt->rchan, RELAY_STATS_BUF_FULL);
575 char buf[16];
576
577 snprintf(buf, sizeof(buf), "%zu\n", dropped);
578
579 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
580 }
581
582 static const struct file_operations blk_dropped_fops = {
583 .owner = THIS_MODULE,
584 .open = simple_open,
585 .read = blk_dropped_read,
586 .llseek = default_llseek,
587 };
588
blk_msg_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)589 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
590 size_t count, loff_t *ppos)
591 {
592 char *msg;
593 struct blk_trace *bt;
594
595 if (count >= BLK_TN_MAX_MSG)
596 return -EINVAL;
597
598 msg = memdup_user_nul(buffer, count);
599 if (IS_ERR(msg))
600 return PTR_ERR(msg);
601
602 bt = filp->private_data;
603 __blk_trace_note_message(bt, NULL, "%s", msg);
604 kfree(msg);
605
606 return count;
607 }
608
609 static const struct file_operations blk_msg_fops = {
610 .owner = THIS_MODULE,
611 .open = simple_open,
612 .write = blk_msg_write,
613 .llseek = noop_llseek,
614 };
615
blk_remove_buf_file_callback(struct dentry * dentry)616 static int blk_remove_buf_file_callback(struct dentry *dentry)
617 {
618 debugfs_remove(dentry);
619
620 return 0;
621 }
622
blk_create_buf_file_callback(const char * filename,struct dentry * parent,umode_t mode,struct rchan_buf * buf,int * is_global)623 static struct dentry *blk_create_buf_file_callback(const char *filename,
624 struct dentry *parent,
625 umode_t mode,
626 struct rchan_buf *buf,
627 int *is_global)
628 {
629 return debugfs_create_file(filename, mode, parent, buf,
630 &relay_file_operations);
631 }
632
633 static const struct rchan_callbacks blk_relay_callbacks = {
634 .create_buf_file = blk_create_buf_file_callback,
635 .remove_buf_file = blk_remove_buf_file_callback,
636 };
637
blk_trace_setup_lba(struct blk_trace * bt,struct block_device * bdev)638 static void blk_trace_setup_lba(struct blk_trace *bt,
639 struct block_device *bdev)
640 {
641 if (bdev) {
642 bt->start_lba = bdev->bd_start_sect;
643 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
644 } else {
645 bt->start_lba = 0;
646 bt->end_lba = -1ULL;
647 }
648 }
649
650 /*
651 * Setup everything required to start tracing
652 */
blk_trace_setup_prepare(struct request_queue * q,char * name,dev_t dev,u32 buf_size,u32 buf_nr,struct block_device * bdev)653 static struct blk_trace *blk_trace_setup_prepare(struct request_queue *q,
654 char *name, dev_t dev,
655 u32 buf_size, u32 buf_nr,
656 struct block_device *bdev)
657 {
658 struct blk_trace *bt = NULL;
659 struct dentry *dir = NULL;
660 int ret;
661
662 lockdep_assert_held(&q->debugfs_mutex);
663
664 /*
665 * bdev can be NULL, as with scsi-generic, this is a helpful as
666 * we can be.
667 */
668 if (rcu_dereference_protected(q->blk_trace,
669 lockdep_is_held(&q->debugfs_mutex))) {
670 pr_warn("Concurrent blktraces are not allowed on %s\n", name);
671 return ERR_PTR(-EBUSY);
672 }
673
674 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
675 if (!bt)
676 return ERR_PTR(-ENOMEM);
677
678 ret = -ENOMEM;
679 bt->sequence = alloc_percpu(unsigned long);
680 if (!bt->sequence)
681 goto err;
682
683 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
684 if (!bt->msg_data)
685 goto err;
686
687 /*
688 * When tracing the whole disk reuse the existing debugfs directory
689 * created by the block layer on init. For partitions block devices,
690 * and scsi-generic block devices we create a temporary new debugfs
691 * directory that will be removed once the trace ends.
692 */
693 if (bdev && !bdev_is_partition(bdev))
694 dir = q->debugfs_dir;
695 else
696 bt->dir = dir = debugfs_create_dir(name, blk_debugfs_root);
697
698 /*
699 * As blktrace relies on debugfs for its interface the debugfs directory
700 * is required, contrary to the usual mantra of not checking for debugfs
701 * files or directories.
702 */
703 if (IS_ERR_OR_NULL(dir)) {
704 pr_warn("debugfs_dir not present for %s so skipping\n", name);
705 ret = -ENOENT;
706 goto err;
707 }
708
709 bt->dev = dev;
710 INIT_LIST_HEAD(&bt->running_list);
711
712 ret = -EIO;
713 debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
714 debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
715
716 bt->rchan = relay_open("trace", dir, buf_size, buf_nr,
717 &blk_relay_callbacks, bt);
718 if (!bt->rchan)
719 goto err;
720
721 blk_trace_setup_lba(bt, bdev);
722
723 return bt;
724
725 err:
726 blk_trace_free(q, bt);
727
728 return ERR_PTR(ret);
729 }
730
blk_trace_setup_finalize(struct request_queue * q,char * name,int version,struct blk_trace * bt,struct blk_user_trace_setup2 * buts)731 static void blk_trace_setup_finalize(struct request_queue *q,
732 char *name, int version,
733 struct blk_trace *bt,
734 struct blk_user_trace_setup2 *buts)
735
736 {
737 strscpy_pad(buts->name, name, BLKTRACE_BDEV_SIZE2);
738
739 /*
740 * some device names have larger paths - convert the slashes
741 * to underscores for this to work as expected
742 */
743 strreplace(buts->name, '/', '_');
744
745 bt->version = version;
746 bt->act_mask = buts->act_mask;
747 if (!bt->act_mask)
748 bt->act_mask = (u16) -1;
749
750 /* overwrite with user settings */
751 if (buts->start_lba)
752 bt->start_lba = buts->start_lba;
753 if (buts->end_lba)
754 bt->end_lba = buts->end_lba;
755
756 bt->pid = buts->pid;
757 bt->trace_state = Blktrace_setup;
758
759 rcu_assign_pointer(q->blk_trace, bt);
760 get_probe_ref();
761 }
762
blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)763 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
764 struct block_device *bdev,
765 char __user *arg)
766 {
767 struct blk_user_trace_setup2 buts2;
768 struct blk_user_trace_setup buts;
769 struct blk_trace *bt;
770 int ret;
771
772 ret = copy_from_user(&buts, arg, sizeof(buts));
773 if (ret)
774 return -EFAULT;
775
776 if (!buts.buf_size || !buts.buf_nr)
777 return -EINVAL;
778
779 buts2 = (struct blk_user_trace_setup2) {
780 .act_mask = buts.act_mask,
781 .buf_size = buts.buf_size,
782 .buf_nr = buts.buf_nr,
783 .start_lba = buts.start_lba,
784 .end_lba = buts.end_lba,
785 .pid = buts.pid,
786 };
787
788 mutex_lock(&q->debugfs_mutex);
789 bt = blk_trace_setup_prepare(q, name, dev, buts.buf_size, buts.buf_nr,
790 bdev);
791 if (IS_ERR(bt)) {
792 mutex_unlock(&q->debugfs_mutex);
793 return PTR_ERR(bt);
794 }
795 blk_trace_setup_finalize(q, name, 1, bt, &buts2);
796 strcpy(buts.name, buts2.name);
797 mutex_unlock(&q->debugfs_mutex);
798
799 if (copy_to_user(arg, &buts, sizeof(buts))) {
800 blk_trace_remove(q);
801 return -EFAULT;
802 }
803 return 0;
804 }
805 EXPORT_SYMBOL_GPL(blk_trace_setup);
806
blk_trace_setup2(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)807 static int blk_trace_setup2(struct request_queue *q, char *name, dev_t dev,
808 struct block_device *bdev, char __user *arg)
809 {
810 struct blk_user_trace_setup2 buts2;
811 struct blk_trace *bt;
812
813 if (copy_from_user(&buts2, arg, sizeof(buts2)))
814 return -EFAULT;
815
816 if (!buts2.buf_size || !buts2.buf_nr)
817 return -EINVAL;
818
819 if (buts2.flags != 0)
820 return -EINVAL;
821
822 mutex_lock(&q->debugfs_mutex);
823 bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
824 bdev);
825 if (IS_ERR(bt)) {
826 mutex_unlock(&q->debugfs_mutex);
827 return PTR_ERR(bt);
828 }
829 blk_trace_setup_finalize(q, name, 2, bt, &buts2);
830 mutex_unlock(&q->debugfs_mutex);
831
832 if (copy_to_user(arg, &buts2, sizeof(buts2))) {
833 blk_trace_remove(q);
834 return -EFAULT;
835 }
836 return 0;
837 }
838
839 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
compat_blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)840 static int compat_blk_trace_setup(struct request_queue *q, char *name,
841 dev_t dev, struct block_device *bdev,
842 char __user *arg)
843 {
844 struct blk_user_trace_setup2 buts2;
845 struct compat_blk_user_trace_setup cbuts;
846 struct blk_trace *bt;
847
848 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
849 return -EFAULT;
850
851 if (!cbuts.buf_size || !cbuts.buf_nr)
852 return -EINVAL;
853
854 buts2 = (struct blk_user_trace_setup2) {
855 .act_mask = cbuts.act_mask,
856 .buf_size = cbuts.buf_size,
857 .buf_nr = cbuts.buf_nr,
858 .start_lba = cbuts.start_lba,
859 .end_lba = cbuts.end_lba,
860 .pid = cbuts.pid,
861 };
862
863 mutex_lock(&q->debugfs_mutex);
864 bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
865 bdev);
866 if (IS_ERR(bt)) {
867 mutex_unlock(&q->debugfs_mutex);
868 return PTR_ERR(bt);
869 }
870 blk_trace_setup_finalize(q, name, 1, bt, &buts2);
871 mutex_unlock(&q->debugfs_mutex);
872
873 if (copy_to_user(arg, &buts2.name, ARRAY_SIZE(buts2.name))) {
874 blk_trace_remove(q);
875 return -EFAULT;
876 }
877
878 return 0;
879 }
880 #endif
881
__blk_trace_startstop(struct request_queue * q,int start)882 static int __blk_trace_startstop(struct request_queue *q, int start)
883 {
884 struct blk_trace *bt;
885
886 bt = rcu_dereference_protected(q->blk_trace,
887 lockdep_is_held(&q->debugfs_mutex));
888 if (bt == NULL)
889 return -EINVAL;
890
891 if (start)
892 return blk_trace_start(bt);
893 else
894 return blk_trace_stop(bt);
895 }
896
blk_trace_startstop(struct request_queue * q,int start)897 int blk_trace_startstop(struct request_queue *q, int start)
898 {
899 int ret;
900
901 mutex_lock(&q->debugfs_mutex);
902 ret = __blk_trace_startstop(q, start);
903 mutex_unlock(&q->debugfs_mutex);
904
905 return ret;
906 }
907 EXPORT_SYMBOL_GPL(blk_trace_startstop);
908
909 /*
910 * When reading or writing the blktrace sysfs files, the references to the
911 * opened sysfs or device files should prevent the underlying block device
912 * from being removed. So no further delete protection is really needed.
913 */
914
915 /**
916 * blk_trace_ioctl - handle the ioctls associated with tracing
917 * @bdev: the block device
918 * @cmd: the ioctl cmd
919 * @arg: the argument data, if any
920 *
921 **/
blk_trace_ioctl(struct block_device * bdev,unsigned cmd,char __user * arg)922 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
923 {
924 struct request_queue *q = bdev_get_queue(bdev);
925 int ret, start = 0;
926 char b[BDEVNAME_SIZE];
927
928 switch (cmd) {
929 case BLKTRACESETUP2:
930 snprintf(b, sizeof(b), "%pg", bdev);
931 ret = blk_trace_setup2(q, b, bdev->bd_dev, bdev, arg);
932 break;
933 case BLKTRACESETUP:
934 snprintf(b, sizeof(b), "%pg", bdev);
935 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
936 break;
937 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
938 case BLKTRACESETUP32:
939 snprintf(b, sizeof(b), "%pg", bdev);
940 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
941 break;
942 #endif
943 case BLKTRACESTART:
944 start = 1;
945 fallthrough;
946 case BLKTRACESTOP:
947 ret = blk_trace_startstop(q, start);
948 break;
949 case BLKTRACETEARDOWN:
950 ret = blk_trace_remove(q);
951 break;
952 default:
953 ret = -ENOTTY;
954 break;
955 }
956 return ret;
957 }
958
959 /**
960 * blk_trace_shutdown - stop and cleanup trace structures
961 * @q: the request queue associated with the device
962 *
963 **/
blk_trace_shutdown(struct request_queue * q)964 void blk_trace_shutdown(struct request_queue *q)
965 {
966 if (rcu_dereference_protected(q->blk_trace,
967 lockdep_is_held(&q->debugfs_mutex)))
968 __blk_trace_remove(q);
969 }
970
971 #ifdef CONFIG_BLK_CGROUP
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)972 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
973 {
974 struct cgroup_subsys_state *blkcg_css;
975 struct blk_trace *bt;
976
977 /* We don't use the 'bt' value here except as an optimization... */
978 bt = rcu_dereference_protected(q->blk_trace, 1);
979 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
980 return 0;
981
982 blkcg_css = bio_blkcg_css(bio);
983 if (!blkcg_css)
984 return 0;
985 return cgroup_id(blkcg_css->cgroup);
986 }
987 #else
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)988 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
989 {
990 return 0;
991 }
992 #endif
993
994 static u64
blk_trace_request_get_cgid(struct request * rq)995 blk_trace_request_get_cgid(struct request *rq)
996 {
997 if (!rq->bio)
998 return 0;
999 /* Use the first bio */
1000 return blk_trace_bio_get_cgid(rq->q, rq->bio);
1001 }
1002
1003 /*
1004 * blktrace probes
1005 */
1006
1007 /**
1008 * blk_add_trace_rq - Add a trace for a request oriented action
1009 * @rq: the source request
1010 * @error: return status to log
1011 * @nr_bytes: number of completed bytes
1012 * @what: the action
1013 * @cgid: the cgroup info
1014 *
1015 * Description:
1016 * Records an action against a request. Will log the bio offset + size.
1017 *
1018 **/
blk_add_trace_rq(struct request * rq,blk_status_t error,unsigned int nr_bytes,u64 what,u64 cgid)1019 static void blk_add_trace_rq(struct request *rq, blk_status_t error,
1020 unsigned int nr_bytes, u64 what, u64 cgid)
1021 {
1022 struct blk_trace *bt;
1023
1024 rcu_read_lock();
1025 bt = rcu_dereference(rq->q->blk_trace);
1026 if (likely(!bt)) {
1027 rcu_read_unlock();
1028 return;
1029 }
1030
1031 if (blk_rq_is_passthrough(rq))
1032 what |= BLK_TC_ACT(BLK_TC_PC);
1033 else
1034 what |= BLK_TC_ACT(BLK_TC_FS);
1035
1036 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
1037 what, blk_status_to_errno(error), 0, NULL, cgid);
1038 rcu_read_unlock();
1039 }
1040
blk_add_trace_rq_insert(void * ignore,struct request * rq)1041 static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
1042 {
1043 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
1044 blk_trace_request_get_cgid(rq));
1045 }
1046
blk_add_trace_rq_issue(void * ignore,struct request * rq)1047 static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
1048 {
1049 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
1050 blk_trace_request_get_cgid(rq));
1051 }
1052
blk_add_trace_rq_merge(void * ignore,struct request * rq)1053 static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
1054 {
1055 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
1056 blk_trace_request_get_cgid(rq));
1057 }
1058
blk_add_trace_rq_requeue(void * ignore,struct request * rq)1059 static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
1060 {
1061 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
1062 blk_trace_request_get_cgid(rq));
1063 }
1064
blk_add_trace_rq_complete(void * ignore,struct request * rq,blk_status_t error,unsigned int nr_bytes)1065 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
1066 blk_status_t error, unsigned int nr_bytes)
1067 {
1068 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
1069 blk_trace_request_get_cgid(rq));
1070 }
1071
blk_add_trace_zone_update_request(void * ignore,struct request * rq)1072 static void blk_add_trace_zone_update_request(void *ignore, struct request *rq)
1073 {
1074 struct blk_trace *bt;
1075
1076 rcu_read_lock();
1077 bt = rcu_dereference(rq->q->blk_trace);
1078 if (likely(!bt) || bt->version < 2) {
1079 rcu_read_unlock();
1080 return;
1081 }
1082 rcu_read_unlock();
1083
1084 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ZONE_APPEND,
1085 blk_trace_request_get_cgid(rq));
1086 }
1087
1088 /**
1089 * blk_add_trace_bio - Add a trace for a bio oriented action
1090 * @q: queue the io is for
1091 * @bio: the source bio
1092 * @what: the action
1093 * @error: error, if any
1094 *
1095 * Description:
1096 * Records an action against a bio. Will log the bio offset + size.
1097 *
1098 **/
blk_add_trace_bio(struct request_queue * q,struct bio * bio,u64 what,int error)1099 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
1100 u64 what, int error)
1101 {
1102 struct blk_trace *bt;
1103
1104 rcu_read_lock();
1105 bt = rcu_dereference(q->blk_trace);
1106 if (likely(!bt)) {
1107 rcu_read_unlock();
1108 return;
1109 }
1110
1111 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1112 bio->bi_opf, what, error, 0, NULL,
1113 blk_trace_bio_get_cgid(q, bio));
1114 rcu_read_unlock();
1115 }
1116
blk_add_trace_bio_complete(void * ignore,struct request_queue * q,struct bio * bio)1117 static void blk_add_trace_bio_complete(void *ignore,
1118 struct request_queue *q, struct bio *bio)
1119 {
1120 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
1121 blk_status_to_errno(bio->bi_status));
1122 }
1123
blk_add_trace_bio_backmerge(void * ignore,struct bio * bio)1124 static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
1125 {
1126 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
1127 0);
1128 }
1129
blk_add_trace_bio_frontmerge(void * ignore,struct bio * bio)1130 static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
1131 {
1132 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
1133 0);
1134 }
1135
blk_add_trace_bio_queue(void * ignore,struct bio * bio)1136 static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
1137 {
1138 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
1139 }
1140
blk_add_trace_getrq(void * ignore,struct bio * bio)1141 static void blk_add_trace_getrq(void *ignore, struct bio *bio)
1142 {
1143 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
1144 }
1145
blk_add_trace_plug(void * ignore,struct request_queue * q)1146 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
1147 {
1148 struct blk_trace *bt;
1149
1150 rcu_read_lock();
1151 bt = rcu_dereference(q->blk_trace);
1152 if (bt)
1153 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
1154 rcu_read_unlock();
1155 }
1156
blk_add_trace_unplug(void * ignore,struct request_queue * q,unsigned int depth,bool explicit)1157 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
1158 unsigned int depth, bool explicit)
1159 {
1160 struct blk_trace *bt;
1161
1162 rcu_read_lock();
1163 bt = rcu_dereference(q->blk_trace);
1164 if (bt) {
1165 __be64 rpdu = cpu_to_be64(depth);
1166 u64 what;
1167
1168 if (explicit)
1169 what = BLK_TA_UNPLUG_IO;
1170 else
1171 what = BLK_TA_UNPLUG_TIMER;
1172
1173 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
1174 }
1175 rcu_read_unlock();
1176 }
1177
blk_add_trace_zone_plug(void * ignore,struct request_queue * q,unsigned int zno,sector_t sector,unsigned int sectors)1178 static void blk_add_trace_zone_plug(void *ignore, struct request_queue *q,
1179 unsigned int zno, sector_t sector,
1180 unsigned int sectors)
1181 {
1182 struct blk_trace *bt;
1183
1184 rcu_read_lock();
1185 bt = rcu_dereference(q->blk_trace);
1186 if (bt && bt->version >= 2)
1187 __blk_add_trace(bt, sector, sectors << SECTOR_SHIFT, 0,
1188 BLK_TA_ZONE_PLUG, 0, 0, NULL, 0);
1189 rcu_read_unlock();
1190
1191 return;
1192 }
1193
blk_add_trace_zone_unplug(void * ignore,struct request_queue * q,unsigned int zno,sector_t sector,unsigned int sectors)1194 static void blk_add_trace_zone_unplug(void *ignore, struct request_queue *q,
1195 unsigned int zno, sector_t sector,
1196 unsigned int sectors)
1197 {
1198 struct blk_trace *bt;
1199
1200 rcu_read_lock();
1201 bt = rcu_dereference(q->blk_trace);
1202 if (bt && bt->version >= 2)
1203 __blk_add_trace(bt, sector, sectors << SECTOR_SHIFT, 0,
1204 BLK_TA_ZONE_UNPLUG, 0, 0, NULL, 0);
1205 rcu_read_unlock();
1206 return;
1207 }
1208
blk_add_trace_split(void * ignore,struct bio * bio,unsigned int pdu)1209 static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
1210 {
1211 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1212 struct blk_trace *bt;
1213
1214 rcu_read_lock();
1215 bt = rcu_dereference(q->blk_trace);
1216 if (bt) {
1217 __be64 rpdu = cpu_to_be64(pdu);
1218
1219 __blk_add_trace(bt, bio->bi_iter.bi_sector,
1220 bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
1221 blk_status_to_errno(bio->bi_status),
1222 sizeof(rpdu), &rpdu,
1223 blk_trace_bio_get_cgid(q, bio));
1224 }
1225 rcu_read_unlock();
1226 }
1227
1228 /**
1229 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1230 * @ignore: trace callback data parameter (not used)
1231 * @bio: the source bio
1232 * @dev: source device
1233 * @from: source sector
1234 *
1235 * Called after a bio is remapped to a different device and/or sector.
1236 **/
blk_add_trace_bio_remap(void * ignore,struct bio * bio,dev_t dev,sector_t from)1237 static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
1238 sector_t from)
1239 {
1240 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1241 struct blk_trace *bt;
1242 struct blk_io_trace_remap r;
1243
1244 rcu_read_lock();
1245 bt = rcu_dereference(q->blk_trace);
1246 if (likely(!bt)) {
1247 rcu_read_unlock();
1248 return;
1249 }
1250
1251 r.device_from = cpu_to_be32(dev);
1252 r.device_to = cpu_to_be32(bio_dev(bio));
1253 r.sector_from = cpu_to_be64(from);
1254
1255 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1256 bio->bi_opf, BLK_TA_REMAP,
1257 blk_status_to_errno(bio->bi_status),
1258 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1259 rcu_read_unlock();
1260 }
1261
1262 /**
1263 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1264 * @ignore: trace callback data parameter (not used)
1265 * @rq: the source request
1266 * @dev: target device
1267 * @from: source sector
1268 *
1269 * Description:
1270 * Device mapper remaps request to other devices.
1271 * Add a trace for that action.
1272 *
1273 **/
blk_add_trace_rq_remap(void * ignore,struct request * rq,dev_t dev,sector_t from)1274 static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
1275 sector_t from)
1276 {
1277 struct blk_trace *bt;
1278 struct blk_io_trace_remap r;
1279
1280 rcu_read_lock();
1281 bt = rcu_dereference(rq->q->blk_trace);
1282 if (likely(!bt)) {
1283 rcu_read_unlock();
1284 return;
1285 }
1286
1287 r.device_from = cpu_to_be32(dev);
1288 r.device_to = cpu_to_be32(disk_devt(rq->q->disk));
1289 r.sector_from = cpu_to_be64(from);
1290
1291 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1292 rq->cmd_flags, BLK_TA_REMAP, 0,
1293 sizeof(r), &r, blk_trace_request_get_cgid(rq));
1294 rcu_read_unlock();
1295 }
1296
1297 /**
1298 * blk_add_driver_data - Add binary message with driver-specific data
1299 * @rq: io request
1300 * @data: driver-specific data
1301 * @len: length of driver-specific data
1302 *
1303 * Description:
1304 * Some drivers might want to write driver-specific data per request.
1305 *
1306 **/
blk_add_driver_data(struct request * rq,void * data,size_t len)1307 void blk_add_driver_data(struct request *rq, void *data, size_t len)
1308 {
1309 struct blk_trace *bt;
1310
1311 rcu_read_lock();
1312 bt = rcu_dereference(rq->q->blk_trace);
1313 if (likely(!bt)) {
1314 rcu_read_unlock();
1315 return;
1316 }
1317
1318 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
1319 BLK_TA_DRV_DATA, 0, len, data,
1320 blk_trace_request_get_cgid(rq));
1321 rcu_read_unlock();
1322 }
1323 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1324
blk_register_tracepoints(void)1325 static void blk_register_tracepoints(void)
1326 {
1327 int ret;
1328
1329 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1330 WARN_ON(ret);
1331 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1332 WARN_ON(ret);
1333 ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1334 WARN_ON(ret);
1335 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1336 WARN_ON(ret);
1337 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1338 WARN_ON(ret);
1339 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1340 WARN_ON(ret);
1341 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1342 WARN_ON(ret);
1343 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1344 WARN_ON(ret);
1345 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1346 WARN_ON(ret);
1347 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1348 WARN_ON(ret);
1349 ret = register_trace_blk_zone_append_update_request_bio(
1350 blk_add_trace_zone_update_request, NULL);
1351 WARN_ON(ret);
1352 ret = register_trace_disk_zone_wplug_add_bio(blk_add_trace_zone_plug,
1353 NULL);
1354 WARN_ON(ret);
1355 ret = register_trace_blk_zone_wplug_bio(blk_add_trace_zone_unplug,
1356 NULL);
1357 WARN_ON(ret);
1358 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1359 WARN_ON(ret);
1360 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1361 WARN_ON(ret);
1362 ret = register_trace_block_split(blk_add_trace_split, NULL);
1363 WARN_ON(ret);
1364 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1365 WARN_ON(ret);
1366 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1367 WARN_ON(ret);
1368 }
1369
blk_unregister_tracepoints(void)1370 static void blk_unregister_tracepoints(void)
1371 {
1372 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1373 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1374 unregister_trace_block_split(blk_add_trace_split, NULL);
1375 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1376 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1377 unregister_trace_blk_zone_wplug_bio(blk_add_trace_zone_unplug, NULL);
1378 unregister_trace_disk_zone_wplug_add_bio(blk_add_trace_zone_plug, NULL);
1379 unregister_trace_blk_zone_append_update_request_bio(
1380 blk_add_trace_zone_update_request, NULL);
1381 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1382 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1383 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1384 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1385 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1386 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1387 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1388 unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1389 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1390 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1391
1392 tracepoint_synchronize_unregister();
1393 }
1394
1395 /*
1396 * struct blk_io_tracer formatting routines
1397 */
1398
fill_rwbs(char * rwbs,const struct blk_io_trace2 * t)1399 static void fill_rwbs(char *rwbs, const struct blk_io_trace2 *t)
1400 {
1401 int i = 0;
1402 int tc = t->action >> BLK_TC_SHIFT;
1403
1404 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1405 rwbs[i++] = 'N';
1406 goto out;
1407 }
1408
1409 if (tc & BLK_TC_FLUSH)
1410 rwbs[i++] = 'F';
1411
1412 if (tc & BLK_TC_DISCARD)
1413 rwbs[i++] = 'D';
1414 else if (tc & BLK_TC_WRITE_ZEROES) {
1415 rwbs[i++] = 'W';
1416 rwbs[i++] = 'Z';
1417 } else if (tc & BLK_TC_WRITE)
1418 rwbs[i++] = 'W';
1419 else if (t->bytes)
1420 rwbs[i++] = 'R';
1421 else
1422 rwbs[i++] = 'N';
1423
1424 if (tc & BLK_TC_FUA)
1425 rwbs[i++] = 'F';
1426 if (tc & BLK_TC_AHEAD)
1427 rwbs[i++] = 'A';
1428 if (tc & BLK_TC_SYNC)
1429 rwbs[i++] = 'S';
1430 if (tc & BLK_TC_META)
1431 rwbs[i++] = 'M';
1432 out:
1433 rwbs[i] = '\0';
1434 }
1435
1436 static inline
te_blk_io_trace(const struct trace_entry * ent)1437 const struct blk_io_trace2 *te_blk_io_trace(const struct trace_entry *ent)
1438 {
1439 return (const struct blk_io_trace2 *)ent;
1440 }
1441
pdu_start(const struct trace_entry * ent,bool has_cg)1442 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1443 {
1444 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1445 }
1446
t_cgid(const struct trace_entry * ent)1447 static inline u64 t_cgid(const struct trace_entry *ent)
1448 {
1449 return *(u64 *)(te_blk_io_trace(ent) + 1);
1450 }
1451
pdu_real_len(const struct trace_entry * ent,bool has_cg)1452 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1453 {
1454 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1455 }
1456
t_action(const struct trace_entry * ent)1457 static inline u32 t_action(const struct trace_entry *ent)
1458 {
1459 return te_blk_io_trace(ent)->action;
1460 }
1461
t_bytes(const struct trace_entry * ent)1462 static inline u32 t_bytes(const struct trace_entry *ent)
1463 {
1464 return te_blk_io_trace(ent)->bytes;
1465 }
1466
t_sec(const struct trace_entry * ent)1467 static inline u32 t_sec(const struct trace_entry *ent)
1468 {
1469 return te_blk_io_trace(ent)->bytes >> 9;
1470 }
1471
t_sector(const struct trace_entry * ent)1472 static inline unsigned long long t_sector(const struct trace_entry *ent)
1473 {
1474 return te_blk_io_trace(ent)->sector;
1475 }
1476
t_error(const struct trace_entry * ent)1477 static inline __u16 t_error(const struct trace_entry *ent)
1478 {
1479 return te_blk_io_trace(ent)->error;
1480 }
1481
get_pdu_int(const struct trace_entry * ent,bool has_cg)1482 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1483 {
1484 const __be64 *val = pdu_start(ent, has_cg);
1485 return be64_to_cpu(*val);
1486 }
1487
1488 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1489 bool has_cg);
1490
blk_log_action_classic(struct trace_iterator * iter,const char * act,bool has_cg)1491 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1492 bool has_cg)
1493 {
1494 char rwbs[RWBS_LEN];
1495 unsigned long long ts = iter->ts;
1496 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1497 unsigned secs = (unsigned long)ts;
1498 const struct blk_io_trace2 *t = te_blk_io_trace(iter->ent);
1499
1500 fill_rwbs(rwbs, t);
1501
1502 trace_seq_printf(&iter->seq,
1503 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1504 MAJOR(t->device), MINOR(t->device), iter->cpu,
1505 secs, nsec_rem, iter->ent->pid, act, rwbs);
1506 }
1507
blk_log_action(struct trace_iterator * iter,const char * act,bool has_cg)1508 static void blk_log_action(struct trace_iterator *iter, const char *act,
1509 bool has_cg)
1510 {
1511 char rwbs[RWBS_LEN];
1512 const struct blk_io_trace2 *t = te_blk_io_trace(iter->ent);
1513
1514 fill_rwbs(rwbs, t);
1515 if (has_cg) {
1516 u64 id = t_cgid(iter->ent);
1517
1518 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1519 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1520
1521 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1522 sizeof(blkcg_name_buf));
1523 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1524 MAJOR(t->device), MINOR(t->device),
1525 blkcg_name_buf, act, rwbs);
1526 } else {
1527 /*
1528 * The cgid portion used to be "INO,GEN". Userland
1529 * builds a FILEID_INO32_GEN fid out of them and
1530 * opens the cgroup using open_by_handle_at(2).
1531 * While 32bit ino setups are still the same, 64bit
1532 * ones now use the 64bit ino as the whole ID and
1533 * no longer use generation.
1534 *
1535 * Regardless of the content, always output
1536 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1537 * be mapped back to @id on both 64 and 32bit ino
1538 * setups. See __kernfs_fh_to_dentry().
1539 */
1540 trace_seq_printf(&iter->seq,
1541 "%3d,%-3d %llx,%-llx %2s %3s ",
1542 MAJOR(t->device), MINOR(t->device),
1543 id & U32_MAX, id >> 32, act, rwbs);
1544 }
1545 } else
1546 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1547 MAJOR(t->device), MINOR(t->device), act, rwbs);
1548 }
1549
blk_log_dump_pdu(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1550 static void blk_log_dump_pdu(struct trace_seq *s,
1551 const struct trace_entry *ent, bool has_cg)
1552 {
1553 const unsigned char *pdu_buf;
1554 int pdu_len;
1555 int i, end;
1556
1557 pdu_buf = pdu_start(ent, has_cg);
1558 pdu_len = pdu_real_len(ent, has_cg);
1559
1560 if (!pdu_len)
1561 return;
1562
1563 /* find the last zero that needs to be printed */
1564 for (end = pdu_len - 1; end >= 0; end--)
1565 if (pdu_buf[end])
1566 break;
1567 end++;
1568
1569 trace_seq_putc(s, '(');
1570
1571 for (i = 0; i < pdu_len; i++) {
1572
1573 trace_seq_printf(s, "%s%02x",
1574 i == 0 ? "" : " ", pdu_buf[i]);
1575
1576 /*
1577 * stop when the rest is just zeros and indicate so
1578 * with a ".." appended
1579 */
1580 if (i == end && end != pdu_len - 1) {
1581 trace_seq_puts(s, " ..) ");
1582 return;
1583 }
1584 }
1585
1586 trace_seq_puts(s, ") ");
1587 }
1588
blk_log_generic(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1589 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1590 {
1591 char cmd[TASK_COMM_LEN];
1592
1593 trace_find_cmdline(ent->pid, cmd);
1594
1595 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1596 trace_seq_printf(s, "%u ", t_bytes(ent));
1597 blk_log_dump_pdu(s, ent, has_cg);
1598 trace_seq_printf(s, "[%s]\n", cmd);
1599 } else {
1600 if (t_sec(ent))
1601 trace_seq_printf(s, "%llu + %u [%s]\n",
1602 t_sector(ent), t_sec(ent), cmd);
1603 else
1604 trace_seq_printf(s, "[%s]\n", cmd);
1605 }
1606 }
1607
blk_log_with_error(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1608 static void blk_log_with_error(struct trace_seq *s,
1609 const struct trace_entry *ent, bool has_cg)
1610 {
1611 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1612 blk_log_dump_pdu(s, ent, has_cg);
1613 trace_seq_printf(s, "[%d]\n", t_error(ent));
1614 } else {
1615 if (t_sec(ent))
1616 trace_seq_printf(s, "%llu + %u [%d]\n",
1617 t_sector(ent),
1618 t_sec(ent), t_error(ent));
1619 else
1620 trace_seq_printf(s, "%llu [%d]\n",
1621 t_sector(ent), t_error(ent));
1622 }
1623 }
1624
blk_log_remap(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1625 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1626 {
1627 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1628
1629 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1630 t_sector(ent), t_sec(ent),
1631 MAJOR(be32_to_cpu(__r->device_from)),
1632 MINOR(be32_to_cpu(__r->device_from)),
1633 be64_to_cpu(__r->sector_from));
1634 }
1635
blk_log_plug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1636 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1637 {
1638 char cmd[TASK_COMM_LEN];
1639
1640 trace_find_cmdline(ent->pid, cmd);
1641
1642 trace_seq_printf(s, "[%s]\n", cmd);
1643 }
1644
blk_log_unplug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1645 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1646 {
1647 char cmd[TASK_COMM_LEN];
1648
1649 trace_find_cmdline(ent->pid, cmd);
1650
1651 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1652 }
1653
blk_log_split(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1654 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1655 {
1656 char cmd[TASK_COMM_LEN];
1657
1658 trace_find_cmdline(ent->pid, cmd);
1659
1660 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1661 get_pdu_int(ent, has_cg), cmd);
1662 }
1663
blk_log_msg(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1664 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1665 bool has_cg)
1666 {
1667
1668 trace_seq_putmem(s, pdu_start(ent, has_cg),
1669 pdu_real_len(ent, has_cg));
1670 trace_seq_putc(s, '\n');
1671 }
1672
1673 /*
1674 * struct tracer operations
1675 */
1676
blk_tracer_print_header(struct seq_file * m)1677 static void blk_tracer_print_header(struct seq_file *m)
1678 {
1679 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1680 return;
1681 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1682 "# | | | | | |\n");
1683 }
1684
blk_tracer_start(struct trace_array * tr)1685 static void blk_tracer_start(struct trace_array *tr)
1686 {
1687 blk_tracer_enabled = true;
1688 }
1689
blk_tracer_init(struct trace_array * tr)1690 static int blk_tracer_init(struct trace_array *tr)
1691 {
1692 blk_tr = tr;
1693 blk_tracer_start(tr);
1694 return 0;
1695 }
1696
blk_tracer_stop(struct trace_array * tr)1697 static void blk_tracer_stop(struct trace_array *tr)
1698 {
1699 blk_tracer_enabled = false;
1700 }
1701
blk_tracer_reset(struct trace_array * tr)1702 static void blk_tracer_reset(struct trace_array *tr)
1703 {
1704 blk_tracer_stop(tr);
1705 }
1706
1707 static const struct {
1708 const char *act[2];
1709 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1710 bool has_cg);
1711 } what2act[] = {
1712 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1713 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1714 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1715 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1716 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1717 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1718 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1719 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1720 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1721 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1722 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1723 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1724 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1725 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1726 };
1727
print_one_line(struct trace_iterator * iter,bool classic)1728 static enum print_line_t print_one_line(struct trace_iterator *iter,
1729 bool classic)
1730 {
1731 struct trace_array *tr = iter->tr;
1732 struct trace_seq *s = &iter->seq;
1733 const struct blk_io_trace2 *t;
1734 u16 what;
1735 bool long_act;
1736 blk_log_action_t *log_action;
1737 bool has_cg;
1738
1739 t = te_blk_io_trace(iter->ent);
1740 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1741 long_act = !!(tr->trace_flags & TRACE_ITER(VERBOSE));
1742 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1743 has_cg = t->action & __BLK_TA_CGROUP;
1744
1745 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1746 log_action(iter, long_act ? "message" : "m", has_cg);
1747 blk_log_msg(s, iter->ent, has_cg);
1748 return trace_handle_return(s);
1749 }
1750
1751 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1752 trace_seq_printf(s, "Unknown action %x\n", what);
1753 else {
1754 log_action(iter, what2act[what].act[long_act], has_cg);
1755 what2act[what].print(s, iter->ent, has_cg);
1756 }
1757
1758 return trace_handle_return(s);
1759 }
1760
blk_trace_event_print(struct trace_iterator * iter,int flags,struct trace_event * event)1761 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1762 int flags, struct trace_event *event)
1763 {
1764 return print_one_line(iter, false);
1765 }
1766
blk_trace_synthesize_old_trace(struct trace_iterator * iter)1767 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1768 {
1769 struct trace_seq *s = &iter->seq;
1770 struct blk_io_trace2 *t = (struct blk_io_trace2 *)iter->ent;
1771 const int offset = offsetof(struct blk_io_trace2, sector);
1772 struct blk_io_trace old = {
1773 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1774 .time = iter->ts,
1775 };
1776
1777 trace_seq_putmem(s, &old, offset);
1778 trace_seq_putmem(s, &t->sector,
1779 sizeof(old) - offset + t->pdu_len);
1780 }
1781
1782 static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator * iter,int flags,struct trace_event * event)1783 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1784 struct trace_event *event)
1785 {
1786 blk_trace_synthesize_old_trace(iter);
1787
1788 return trace_handle_return(&iter->seq);
1789 }
1790
blk_tracer_print_line(struct trace_iterator * iter)1791 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1792 {
1793 if ((iter->ent->type != TRACE_BLK) ||
1794 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1795 return TRACE_TYPE_UNHANDLED;
1796
1797 return print_one_line(iter, true);
1798 }
1799
1800 static int
blk_tracer_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1801 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1802 {
1803 /* don't output context-info for blk_classic output */
1804 if (bit == TRACE_BLK_OPT_CLASSIC) {
1805 if (set)
1806 tr->trace_flags &= ~TRACE_ITER(CONTEXT_INFO);
1807 else
1808 tr->trace_flags |= TRACE_ITER(CONTEXT_INFO);
1809 }
1810 return 0;
1811 }
1812
1813 static struct tracer blk_tracer __read_mostly = {
1814 .name = "blk",
1815 .init = blk_tracer_init,
1816 .reset = blk_tracer_reset,
1817 .start = blk_tracer_start,
1818 .stop = blk_tracer_stop,
1819 .print_header = blk_tracer_print_header,
1820 .print_line = blk_tracer_print_line,
1821 .flags = &blk_tracer_flags,
1822 .set_flag = blk_tracer_set_flag,
1823 };
1824
1825 static struct trace_event_functions trace_blk_event_funcs = {
1826 .trace = blk_trace_event_print,
1827 .binary = blk_trace_event_print_binary,
1828 };
1829
1830 static struct trace_event trace_blk_event = {
1831 .type = TRACE_BLK,
1832 .funcs = &trace_blk_event_funcs,
1833 };
1834
init_blk_tracer(void)1835 static int __init init_blk_tracer(void)
1836 {
1837 if (!register_trace_event(&trace_blk_event)) {
1838 pr_warn("Warning: could not register block events\n");
1839 return 1;
1840 }
1841
1842 if (register_tracer(&blk_tracer) != 0) {
1843 pr_warn("Warning: could not register the block tracer\n");
1844 unregister_trace_event(&trace_blk_event);
1845 return 1;
1846 }
1847
1848 BUILD_BUG_ON(__alignof__(struct blk_user_trace_setup2) %
1849 __alignof__(long));
1850 BUILD_BUG_ON(__alignof__(struct blk_io_trace2) % __alignof__(long));
1851
1852 return 0;
1853 }
1854
1855 device_initcall(init_blk_tracer);
1856
blk_trace_remove_queue(struct request_queue * q)1857 static int blk_trace_remove_queue(struct request_queue *q)
1858 {
1859 struct blk_trace *bt;
1860
1861 bt = rcu_replace_pointer(q->blk_trace, NULL,
1862 lockdep_is_held(&q->debugfs_mutex));
1863 if (bt == NULL)
1864 return -EINVAL;
1865
1866 blk_trace_stop(bt);
1867
1868 put_probe_ref();
1869 synchronize_rcu();
1870 blk_trace_free(q, bt);
1871 return 0;
1872 }
1873
1874 /*
1875 * Setup everything required to start tracing
1876 */
blk_trace_setup_queue(struct request_queue * q,struct block_device * bdev)1877 static int blk_trace_setup_queue(struct request_queue *q,
1878 struct block_device *bdev)
1879 {
1880 struct blk_trace *bt = NULL;
1881 int ret = -ENOMEM;
1882
1883 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1884 if (!bt)
1885 return -ENOMEM;
1886
1887 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1888 if (!bt->msg_data)
1889 goto free_bt;
1890
1891 bt->dev = bdev->bd_dev;
1892 bt->act_mask = (u16)-1;
1893
1894 blk_trace_setup_lba(bt, bdev);
1895
1896 rcu_assign_pointer(q->blk_trace, bt);
1897 get_probe_ref();
1898 return 0;
1899
1900 free_bt:
1901 blk_trace_free(q, bt);
1902 return ret;
1903 }
1904
1905 /*
1906 * sysfs interface to enable and configure tracing
1907 */
1908
1909 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1910 struct device_attribute *attr,
1911 char *buf);
1912 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1913 struct device_attribute *attr,
1914 const char *buf, size_t count);
1915 #define BLK_TRACE_DEVICE_ATTR(_name) \
1916 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1917 sysfs_blk_trace_attr_show, \
1918 sysfs_blk_trace_attr_store)
1919
1920 static BLK_TRACE_DEVICE_ATTR(enable);
1921 static BLK_TRACE_DEVICE_ATTR(act_mask);
1922 static BLK_TRACE_DEVICE_ATTR(pid);
1923 static BLK_TRACE_DEVICE_ATTR(start_lba);
1924 static BLK_TRACE_DEVICE_ATTR(end_lba);
1925
1926 static struct attribute *blk_trace_attrs[] = {
1927 &dev_attr_enable.attr,
1928 &dev_attr_act_mask.attr,
1929 &dev_attr_pid.attr,
1930 &dev_attr_start_lba.attr,
1931 &dev_attr_end_lba.attr,
1932 NULL
1933 };
1934
1935 struct attribute_group blk_trace_attr_group = {
1936 .name = "trace",
1937 .attrs = blk_trace_attrs,
1938 };
1939
1940 static const struct {
1941 int mask;
1942 const char *str;
1943 } mask_maps[] = {
1944 { BLK_TC_READ, "read" },
1945 { BLK_TC_WRITE, "write" },
1946 { BLK_TC_FLUSH, "flush" },
1947 { BLK_TC_SYNC, "sync" },
1948 { BLK_TC_QUEUE, "queue" },
1949 { BLK_TC_REQUEUE, "requeue" },
1950 { BLK_TC_ISSUE, "issue" },
1951 { BLK_TC_COMPLETE, "complete" },
1952 { BLK_TC_FS, "fs" },
1953 { BLK_TC_PC, "pc" },
1954 { BLK_TC_NOTIFY, "notify" },
1955 { BLK_TC_AHEAD, "ahead" },
1956 { BLK_TC_META, "meta" },
1957 { BLK_TC_DISCARD, "discard" },
1958 { BLK_TC_DRV_DATA, "drv_data" },
1959 { BLK_TC_FUA, "fua" },
1960 { BLK_TC_WRITE_ZEROES, "write-zeroes" },
1961 };
1962
blk_trace_str2mask(const char * str)1963 static int blk_trace_str2mask(const char *str)
1964 {
1965 int i;
1966 int mask = 0;
1967 char *buf, *s, *token;
1968
1969 buf = kstrdup(str, GFP_KERNEL);
1970 if (buf == NULL)
1971 return -ENOMEM;
1972 s = strstrip(buf);
1973
1974 while (1) {
1975 token = strsep(&s, ",");
1976 if (token == NULL)
1977 break;
1978
1979 if (*token == '\0')
1980 continue;
1981
1982 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1983 if (strcasecmp(token, mask_maps[i].str) == 0) {
1984 mask |= mask_maps[i].mask;
1985 break;
1986 }
1987 }
1988 if (i == ARRAY_SIZE(mask_maps)) {
1989 mask = -EINVAL;
1990 break;
1991 }
1992 }
1993 kfree(buf);
1994
1995 return mask;
1996 }
1997
blk_trace_mask2str(char * buf,int mask)1998 static ssize_t blk_trace_mask2str(char *buf, int mask)
1999 {
2000 int i;
2001 char *p = buf;
2002
2003 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
2004 if (mask & mask_maps[i].mask) {
2005 p += sprintf(p, "%s%s",
2006 (p == buf) ? "" : ",", mask_maps[i].str);
2007 }
2008 }
2009 *p++ = '\n';
2010
2011 return p - buf;
2012 }
2013
sysfs_blk_trace_attr_show(struct device * dev,struct device_attribute * attr,char * buf)2014 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
2015 struct device_attribute *attr,
2016 char *buf)
2017 {
2018 struct block_device *bdev = dev_to_bdev(dev);
2019 struct request_queue *q = bdev_get_queue(bdev);
2020 struct blk_trace *bt;
2021 ssize_t ret = -ENXIO;
2022
2023 mutex_lock(&q->debugfs_mutex);
2024
2025 bt = rcu_dereference_protected(q->blk_trace,
2026 lockdep_is_held(&q->debugfs_mutex));
2027 if (attr == &dev_attr_enable) {
2028 ret = sprintf(buf, "%u\n", !!bt);
2029 goto out_unlock_bdev;
2030 }
2031
2032 if (bt == NULL)
2033 ret = sprintf(buf, "disabled\n");
2034 else if (attr == &dev_attr_act_mask)
2035 ret = blk_trace_mask2str(buf, bt->act_mask);
2036 else if (attr == &dev_attr_pid)
2037 ret = sprintf(buf, "%u\n", bt->pid);
2038 else if (attr == &dev_attr_start_lba)
2039 ret = sprintf(buf, "%llu\n", bt->start_lba);
2040 else if (attr == &dev_attr_end_lba)
2041 ret = sprintf(buf, "%llu\n", bt->end_lba);
2042
2043 out_unlock_bdev:
2044 mutex_unlock(&q->debugfs_mutex);
2045 return ret;
2046 }
2047
sysfs_blk_trace_attr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2048 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
2049 struct device_attribute *attr,
2050 const char *buf, size_t count)
2051 {
2052 struct block_device *bdev = dev_to_bdev(dev);
2053 struct request_queue *q = bdev_get_queue(bdev);
2054 struct blk_trace *bt;
2055 u64 value;
2056 ssize_t ret = -EINVAL;
2057
2058 if (count == 0)
2059 goto out;
2060
2061 if (attr == &dev_attr_act_mask) {
2062 if (kstrtoull(buf, 0, &value)) {
2063 /* Assume it is a list of trace category names */
2064 ret = blk_trace_str2mask(buf);
2065 if (ret < 0)
2066 goto out;
2067 value = ret;
2068 }
2069 } else {
2070 if (kstrtoull(buf, 0, &value))
2071 goto out;
2072 }
2073
2074 mutex_lock(&q->debugfs_mutex);
2075
2076 bt = rcu_dereference_protected(q->blk_trace,
2077 lockdep_is_held(&q->debugfs_mutex));
2078 if (attr == &dev_attr_enable) {
2079 if (!!value == !!bt) {
2080 ret = 0;
2081 goto out_unlock_bdev;
2082 }
2083 if (value)
2084 ret = blk_trace_setup_queue(q, bdev);
2085 else
2086 ret = blk_trace_remove_queue(q);
2087 goto out_unlock_bdev;
2088 }
2089
2090 ret = 0;
2091 if (bt == NULL) {
2092 ret = blk_trace_setup_queue(q, bdev);
2093 bt = rcu_dereference_protected(q->blk_trace,
2094 lockdep_is_held(&q->debugfs_mutex));
2095 }
2096
2097 if (ret == 0) {
2098 if (attr == &dev_attr_act_mask)
2099 bt->act_mask = value;
2100 else if (attr == &dev_attr_pid)
2101 bt->pid = value;
2102 else if (attr == &dev_attr_start_lba)
2103 bt->start_lba = value;
2104 else if (attr == &dev_attr_end_lba)
2105 bt->end_lba = value;
2106 }
2107
2108 out_unlock_bdev:
2109 mutex_unlock(&q->debugfs_mutex);
2110 out:
2111 return ret ? ret : count;
2112 }
2113 #endif /* CONFIG_BLK_DEV_IO_TRACE */
2114
2115 #ifdef CONFIG_EVENT_TRACING
2116
2117 /**
2118 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
2119 * @rwbs: buffer to be filled
2120 * @opf: request operation type (REQ_OP_XXX) and flags for the tracepoint
2121 *
2122 * Description:
2123 * Maps each request operation and flag to a single character and fills the
2124 * buffer provided by the caller with resulting string.
2125 *
2126 **/
blk_fill_rwbs(char * rwbs,blk_opf_t opf)2127 void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
2128 {
2129 int i = 0;
2130
2131 if (opf & REQ_PREFLUSH)
2132 rwbs[i++] = 'F';
2133
2134 switch (opf & REQ_OP_MASK) {
2135 case REQ_OP_WRITE:
2136 rwbs[i++] = 'W';
2137 break;
2138 case REQ_OP_DISCARD:
2139 rwbs[i++] = 'D';
2140 break;
2141 case REQ_OP_SECURE_ERASE:
2142 rwbs[i++] = 'D';
2143 rwbs[i++] = 'E';
2144 break;
2145 case REQ_OP_FLUSH:
2146 rwbs[i++] = 'F';
2147 break;
2148 case REQ_OP_READ:
2149 rwbs[i++] = 'R';
2150 break;
2151 case REQ_OP_ZONE_APPEND:
2152 rwbs[i++] = 'Z';
2153 rwbs[i++] = 'A';
2154 break;
2155 case REQ_OP_ZONE_RESET:
2156 case REQ_OP_ZONE_RESET_ALL:
2157 rwbs[i++] = 'Z';
2158 rwbs[i++] = 'R';
2159 if ((opf & REQ_OP_MASK) == REQ_OP_ZONE_RESET_ALL)
2160 rwbs[i++] = 'A';
2161 break;
2162 case REQ_OP_ZONE_FINISH:
2163 rwbs[i++] = 'Z';
2164 rwbs[i++] = 'F';
2165 break;
2166 case REQ_OP_ZONE_OPEN:
2167 rwbs[i++] = 'Z';
2168 rwbs[i++] = 'O';
2169 break;
2170 case REQ_OP_ZONE_CLOSE:
2171 rwbs[i++] = 'Z';
2172 rwbs[i++] = 'C';
2173 break;
2174 case REQ_OP_WRITE_ZEROES:
2175 rwbs[i++] = 'W';
2176 rwbs[i++] = 'Z';
2177 break;
2178 default:
2179 rwbs[i++] = 'N';
2180 }
2181
2182 if (opf & REQ_FUA)
2183 rwbs[i++] = 'F';
2184 if (opf & REQ_RAHEAD)
2185 rwbs[i++] = 'A';
2186 if (opf & REQ_SYNC)
2187 rwbs[i++] = 'S';
2188 if (opf & REQ_META)
2189 rwbs[i++] = 'M';
2190 if (opf & REQ_ATOMIC)
2191 rwbs[i++] = 'U';
2192
2193 WARN_ON_ONCE(i >= RWBS_LEN);
2194
2195 rwbs[i] = '\0';
2196 }
2197 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
2198
2199 #endif /* CONFIG_EVENT_TRACING */
2200
2201