1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> 4 * 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/blkdev.h> 11 #include <linux/blktrace_api.h> 12 #include <linux/percpu.h> 13 #include <linux/init.h> 14 #include <linux/mutex.h> 15 #include <linux/slab.h> 16 #include <linux/debugfs.h> 17 #include <linux/export.h> 18 #include <linux/time.h> 19 #include <linux/uaccess.h> 20 #include <linux/list.h> 21 #include <linux/blk-cgroup.h> 22 23 #include "../../block/blk.h" 24 25 #include <trace/events/block.h> 26 27 #include "trace_output.h" 28 29 #ifdef CONFIG_BLK_DEV_IO_TRACE 30 31 static unsigned int blktrace_seq __read_mostly = 1; 32 33 static struct trace_array *blk_tr; 34 static bool blk_tracer_enabled __read_mostly; 35 36 static LIST_HEAD(running_trace_list); 37 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock); 38 39 /* Select an alternative, minimalistic output than the original one */ 40 #define TRACE_BLK_OPT_CLASSIC 0x1 41 #define TRACE_BLK_OPT_CGROUP 0x2 42 #define TRACE_BLK_OPT_CGNAME 0x4 43 44 static struct tracer_opt blk_tracer_opts[] = { 45 /* Default disable the minimalistic output */ 46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, 47 #ifdef CONFIG_BLK_CGROUP 48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) }, 49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) }, 50 #endif 51 { } 52 }; 53 54 static struct tracer_flags blk_tracer_flags = { 55 .val = 0, 56 .opts = blk_tracer_opts, 57 }; 58 59 /* Global reference count of probes */ 60 static DEFINE_MUTEX(blk_probe_mutex); 61 static int blk_probes_ref; 62 63 static void blk_register_tracepoints(void); 64 static void blk_unregister_tracepoints(void); 65 66 /* 67 * Send out a notify message. 68 */ 69 static void trace_note(struct blk_trace *bt, pid_t pid, int action, 70 const void *data, size_t len, u64 cgid) 71 { 72 struct blk_io_trace *t; 73 struct ring_buffer_event *event = NULL; 74 struct trace_buffer *buffer = NULL; 75 unsigned int trace_ctx = 0; 76 int cpu = smp_processor_id(); 77 bool blk_tracer = blk_tracer_enabled; 78 ssize_t cgid_len = cgid ? sizeof(cgid) : 0; 79 80 if (blk_tracer) { 81 buffer = blk_tr->array_buffer.buffer; 82 trace_ctx = tracing_gen_ctx_flags(0); 83 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 84 sizeof(*t) + len + cgid_len, 85 trace_ctx); 86 if (!event) 87 return; 88 t = ring_buffer_event_data(event); 89 goto record_it; 90 } 91 92 if (!bt->rchan) 93 return; 94 95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); 96 if (t) { 97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 98 t->time = ktime_to_ns(ktime_get()); 99 record_it: 100 t->device = bt->dev; 101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0); 102 t->pid = pid; 103 t->cpu = cpu; 104 t->pdu_len = len + cgid_len; 105 if (cgid_len) 106 memcpy((void *)t + sizeof(*t), &cgid, cgid_len); 107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len); 108 109 if (blk_tracer) 110 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx); 111 } 112 } 113 114 /* 115 * Send out a notify for this process, if we haven't done so since a trace 116 * started 117 */ 118 static void trace_note_tsk(struct task_struct *tsk) 119 { 120 unsigned long flags; 121 struct blk_trace *bt; 122 123 tsk->btrace_seq = blktrace_seq; 124 raw_spin_lock_irqsave(&running_trace_lock, flags); 125 list_for_each_entry(bt, &running_trace_list, running_list) { 126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, 127 sizeof(tsk->comm), 0); 128 } 129 raw_spin_unlock_irqrestore(&running_trace_lock, flags); 130 } 131 132 static void trace_note_time(struct blk_trace *bt) 133 { 134 struct timespec64 now; 135 unsigned long flags; 136 u32 words[2]; 137 138 /* need to check user space to see if this breaks in y2038 or y2106 */ 139 ktime_get_real_ts64(&now); 140 words[0] = (u32)now.tv_sec; 141 words[1] = now.tv_nsec; 142 143 local_irq_save(flags); 144 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0); 145 local_irq_restore(flags); 146 } 147 148 void __blk_trace_note_message(struct blk_trace *bt, 149 struct cgroup_subsys_state *css, const char *fmt, ...) 150 { 151 int n; 152 va_list args; 153 unsigned long flags; 154 char *buf; 155 u64 cgid = 0; 156 157 if (unlikely(bt->trace_state != Blktrace_running && 158 !blk_tracer_enabled)) 159 return; 160 161 /* 162 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note 163 * message to the trace. 164 */ 165 if (!(bt->act_mask & BLK_TC_NOTIFY)) 166 return; 167 168 local_irq_save(flags); 169 buf = this_cpu_ptr(bt->msg_data); 170 va_start(args, fmt); 171 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); 172 va_end(args); 173 174 #ifdef CONFIG_BLK_CGROUP 175 if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 176 cgid = cgroup_id(css->cgroup); 177 else 178 cgid = 1; 179 #endif 180 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid); 181 local_irq_restore(flags); 182 } 183 EXPORT_SYMBOL_GPL(__blk_trace_note_message); 184 185 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, 186 pid_t pid) 187 { 188 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) 189 return 1; 190 if (sector && (sector < bt->start_lba || sector > bt->end_lba)) 191 return 1; 192 if (bt->pid && pid != bt->pid) 193 return 1; 194 195 return 0; 196 } 197 198 /* 199 * Data direction bit lookup 200 */ 201 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 202 BLK_TC_ACT(BLK_TC_WRITE) }; 203 204 #define BLK_TC_RAHEAD BLK_TC_AHEAD 205 #define BLK_TC_PREFLUSH BLK_TC_FLUSH 206 207 /* The ilog2() calls fall out because they're constant */ 208 #define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \ 209 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) 210 211 /* 212 * The worker for the various blk_add_trace*() types. Fills out a 213 * blk_io_trace structure and places it in a per-cpu subbuffer. 214 */ 215 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, 216 const blk_opf_t opf, u32 what, int error, 217 int pdu_len, void *pdu_data, u64 cgid) 218 { 219 struct task_struct *tsk = current; 220 struct ring_buffer_event *event = NULL; 221 struct trace_buffer *buffer = NULL; 222 struct blk_io_trace *t; 223 unsigned long flags = 0; 224 unsigned long *sequence; 225 unsigned int trace_ctx = 0; 226 pid_t pid; 227 int cpu; 228 bool blk_tracer = blk_tracer_enabled; 229 ssize_t cgid_len = cgid ? sizeof(cgid) : 0; 230 const enum req_op op = opf & REQ_OP_MASK; 231 232 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) 233 return; 234 235 what |= ddir_act[op_is_write(op) ? WRITE : READ]; 236 what |= MASK_TC_BIT(opf, SYNC); 237 what |= MASK_TC_BIT(opf, RAHEAD); 238 what |= MASK_TC_BIT(opf, META); 239 what |= MASK_TC_BIT(opf, PREFLUSH); 240 what |= MASK_TC_BIT(opf, FUA); 241 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) 242 what |= BLK_TC_ACT(BLK_TC_DISCARD); 243 if (op == REQ_OP_FLUSH) 244 what |= BLK_TC_ACT(BLK_TC_FLUSH); 245 if (cgid) 246 what |= __BLK_TA_CGROUP; 247 248 pid = tsk->pid; 249 if (act_log_check(bt, what, sector, pid)) 250 return; 251 cpu = raw_smp_processor_id(); 252 253 if (blk_tracer) { 254 tracing_record_cmdline(current); 255 256 buffer = blk_tr->array_buffer.buffer; 257 trace_ctx = tracing_gen_ctx_flags(0); 258 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 259 sizeof(*t) + pdu_len + cgid_len, 260 trace_ctx); 261 if (!event) 262 return; 263 t = ring_buffer_event_data(event); 264 goto record_it; 265 } 266 267 if (unlikely(tsk->btrace_seq != blktrace_seq)) 268 trace_note_tsk(tsk); 269 270 /* 271 * A word about the locking here - we disable interrupts to reserve 272 * some space in the relay per-cpu buffer, to prevent an irq 273 * from coming in and stepping on our toes. 274 */ 275 local_irq_save(flags); 276 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len); 277 if (t) { 278 sequence = per_cpu_ptr(bt->sequence, cpu); 279 280 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 281 t->sequence = ++(*sequence); 282 t->time = ktime_to_ns(ktime_get()); 283 record_it: 284 /* 285 * These two are not needed in ftrace as they are in the 286 * generic trace_entry, filled by tracing_generic_entry_update, 287 * but for the trace_event->bin() synthesizer benefit we do it 288 * here too. 289 */ 290 t->cpu = cpu; 291 t->pid = pid; 292 293 t->sector = sector; 294 t->bytes = bytes; 295 t->action = what; 296 t->device = bt->dev; 297 t->error = error; 298 t->pdu_len = pdu_len + cgid_len; 299 300 if (cgid_len) 301 memcpy((void *)t + sizeof(*t), &cgid, cgid_len); 302 if (pdu_len) 303 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len); 304 305 if (blk_tracer) { 306 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx); 307 return; 308 } 309 } 310 311 local_irq_restore(flags); 312 } 313 314 static void blk_trace_free(struct request_queue *q, struct blk_trace *bt) 315 { 316 relay_close(bt->rchan); 317 318 /* 319 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created 320 * under 'q->debugfs_dir', thus lookup and remove them. 321 */ 322 if (!bt->dir) { 323 debugfs_lookup_and_remove("dropped", q->debugfs_dir); 324 debugfs_lookup_and_remove("msg", q->debugfs_dir); 325 } else { 326 debugfs_remove(bt->dir); 327 } 328 free_percpu(bt->sequence); 329 free_percpu(bt->msg_data); 330 kfree(bt); 331 } 332 333 static void get_probe_ref(void) 334 { 335 mutex_lock(&blk_probe_mutex); 336 if (++blk_probes_ref == 1) 337 blk_register_tracepoints(); 338 mutex_unlock(&blk_probe_mutex); 339 } 340 341 static void put_probe_ref(void) 342 { 343 mutex_lock(&blk_probe_mutex); 344 if (!--blk_probes_ref) 345 blk_unregister_tracepoints(); 346 mutex_unlock(&blk_probe_mutex); 347 } 348 349 static int blk_trace_start(struct blk_trace *bt) 350 { 351 if (bt->trace_state != Blktrace_setup && 352 bt->trace_state != Blktrace_stopped) 353 return -EINVAL; 354 355 blktrace_seq++; 356 smp_mb(); 357 bt->trace_state = Blktrace_running; 358 raw_spin_lock_irq(&running_trace_lock); 359 list_add(&bt->running_list, &running_trace_list); 360 raw_spin_unlock_irq(&running_trace_lock); 361 trace_note_time(bt); 362 363 return 0; 364 } 365 366 static int blk_trace_stop(struct blk_trace *bt) 367 { 368 if (bt->trace_state != Blktrace_running) 369 return -EINVAL; 370 371 bt->trace_state = Blktrace_stopped; 372 raw_spin_lock_irq(&running_trace_lock); 373 list_del_init(&bt->running_list); 374 raw_spin_unlock_irq(&running_trace_lock); 375 relay_flush(bt->rchan); 376 377 return 0; 378 } 379 380 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt) 381 { 382 blk_trace_stop(bt); 383 synchronize_rcu(); 384 blk_trace_free(q, bt); 385 put_probe_ref(); 386 } 387 388 static int __blk_trace_remove(struct request_queue *q) 389 { 390 struct blk_trace *bt; 391 392 bt = rcu_replace_pointer(q->blk_trace, NULL, 393 lockdep_is_held(&q->debugfs_mutex)); 394 if (!bt) 395 return -EINVAL; 396 397 blk_trace_cleanup(q, bt); 398 399 return 0; 400 } 401 402 int blk_trace_remove(struct request_queue *q) 403 { 404 int ret; 405 406 mutex_lock(&q->debugfs_mutex); 407 ret = __blk_trace_remove(q); 408 mutex_unlock(&q->debugfs_mutex); 409 410 return ret; 411 } 412 EXPORT_SYMBOL_GPL(blk_trace_remove); 413 414 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, 415 size_t count, loff_t *ppos) 416 { 417 struct blk_trace *bt = filp->private_data; 418 char buf[16]; 419 420 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); 421 422 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); 423 } 424 425 static const struct file_operations blk_dropped_fops = { 426 .owner = THIS_MODULE, 427 .open = simple_open, 428 .read = blk_dropped_read, 429 .llseek = default_llseek, 430 }; 431 432 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, 433 size_t count, loff_t *ppos) 434 { 435 char *msg; 436 struct blk_trace *bt; 437 438 if (count >= BLK_TN_MAX_MSG) 439 return -EINVAL; 440 441 msg = memdup_user_nul(buffer, count); 442 if (IS_ERR(msg)) 443 return PTR_ERR(msg); 444 445 bt = filp->private_data; 446 __blk_trace_note_message(bt, NULL, "%s", msg); 447 kfree(msg); 448 449 return count; 450 } 451 452 static const struct file_operations blk_msg_fops = { 453 .owner = THIS_MODULE, 454 .open = simple_open, 455 .write = blk_msg_write, 456 .llseek = noop_llseek, 457 }; 458 459 /* 460 * Keep track of how many times we encountered a full subbuffer, to aid 461 * the user space app in telling how many lost events there were. 462 */ 463 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, 464 void *prev_subbuf, size_t prev_padding) 465 { 466 struct blk_trace *bt; 467 468 if (!relay_buf_full(buf)) 469 return 1; 470 471 bt = buf->chan->private_data; 472 atomic_inc(&bt->dropped); 473 return 0; 474 } 475 476 static int blk_remove_buf_file_callback(struct dentry *dentry) 477 { 478 debugfs_remove(dentry); 479 480 return 0; 481 } 482 483 static struct dentry *blk_create_buf_file_callback(const char *filename, 484 struct dentry *parent, 485 umode_t mode, 486 struct rchan_buf *buf, 487 int *is_global) 488 { 489 return debugfs_create_file(filename, mode, parent, buf, 490 &relay_file_operations); 491 } 492 493 static const struct rchan_callbacks blk_relay_callbacks = { 494 .subbuf_start = blk_subbuf_start_callback, 495 .create_buf_file = blk_create_buf_file_callback, 496 .remove_buf_file = blk_remove_buf_file_callback, 497 }; 498 499 static void blk_trace_setup_lba(struct blk_trace *bt, 500 struct block_device *bdev) 501 { 502 if (bdev) { 503 bt->start_lba = bdev->bd_start_sect; 504 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev); 505 } else { 506 bt->start_lba = 0; 507 bt->end_lba = -1ULL; 508 } 509 } 510 511 /* 512 * Setup everything required to start tracing 513 */ 514 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 515 struct block_device *bdev, 516 struct blk_user_trace_setup *buts) 517 { 518 struct blk_trace *bt = NULL; 519 struct dentry *dir = NULL; 520 int ret; 521 522 lockdep_assert_held(&q->debugfs_mutex); 523 524 if (!buts->buf_size || !buts->buf_nr) 525 return -EINVAL; 526 527 strscpy_pad(buts->name, name, BLKTRACE_BDEV_SIZE); 528 529 /* 530 * some device names have larger paths - convert the slashes 531 * to underscores for this to work as expected 532 */ 533 strreplace(buts->name, '/', '_'); 534 535 /* 536 * bdev can be NULL, as with scsi-generic, this is a helpful as 537 * we can be. 538 */ 539 if (rcu_dereference_protected(q->blk_trace, 540 lockdep_is_held(&q->debugfs_mutex))) { 541 pr_warn("Concurrent blktraces are not allowed on %s\n", 542 buts->name); 543 return -EBUSY; 544 } 545 546 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 547 if (!bt) 548 return -ENOMEM; 549 550 ret = -ENOMEM; 551 bt->sequence = alloc_percpu(unsigned long); 552 if (!bt->sequence) 553 goto err; 554 555 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); 556 if (!bt->msg_data) 557 goto err; 558 559 /* 560 * When tracing the whole disk reuse the existing debugfs directory 561 * created by the block layer on init. For partitions block devices, 562 * and scsi-generic block devices we create a temporary new debugfs 563 * directory that will be removed once the trace ends. 564 */ 565 if (bdev && !bdev_is_partition(bdev)) 566 dir = q->debugfs_dir; 567 else 568 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); 569 570 /* 571 * As blktrace relies on debugfs for its interface the debugfs directory 572 * is required, contrary to the usual mantra of not checking for debugfs 573 * files or directories. 574 */ 575 if (IS_ERR_OR_NULL(dir)) { 576 pr_warn("debugfs_dir not present for %s so skipping\n", 577 buts->name); 578 ret = -ENOENT; 579 goto err; 580 } 581 582 bt->dev = dev; 583 atomic_set(&bt->dropped, 0); 584 INIT_LIST_HEAD(&bt->running_list); 585 586 ret = -EIO; 587 debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); 588 debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); 589 590 bt->rchan = relay_open("trace", dir, buts->buf_size, 591 buts->buf_nr, &blk_relay_callbacks, bt); 592 if (!bt->rchan) 593 goto err; 594 595 bt->act_mask = buts->act_mask; 596 if (!bt->act_mask) 597 bt->act_mask = (u16) -1; 598 599 blk_trace_setup_lba(bt, bdev); 600 601 /* overwrite with user settings */ 602 if (buts->start_lba) 603 bt->start_lba = buts->start_lba; 604 if (buts->end_lba) 605 bt->end_lba = buts->end_lba; 606 607 bt->pid = buts->pid; 608 bt->trace_state = Blktrace_setup; 609 610 rcu_assign_pointer(q->blk_trace, bt); 611 get_probe_ref(); 612 613 ret = 0; 614 err: 615 if (ret) 616 blk_trace_free(q, bt); 617 return ret; 618 } 619 620 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 621 struct block_device *bdev, 622 char __user *arg) 623 { 624 struct blk_user_trace_setup buts; 625 int ret; 626 627 ret = copy_from_user(&buts, arg, sizeof(buts)); 628 if (ret) 629 return -EFAULT; 630 631 mutex_lock(&q->debugfs_mutex); 632 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 633 mutex_unlock(&q->debugfs_mutex); 634 if (ret) 635 return ret; 636 637 if (copy_to_user(arg, &buts, sizeof(buts))) { 638 blk_trace_remove(q); 639 return -EFAULT; 640 } 641 return 0; 642 } 643 EXPORT_SYMBOL_GPL(blk_trace_setup); 644 645 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 646 static int compat_blk_trace_setup(struct request_queue *q, char *name, 647 dev_t dev, struct block_device *bdev, 648 char __user *arg) 649 { 650 struct blk_user_trace_setup buts; 651 struct compat_blk_user_trace_setup cbuts; 652 int ret; 653 654 if (copy_from_user(&cbuts, arg, sizeof(cbuts))) 655 return -EFAULT; 656 657 buts = (struct blk_user_trace_setup) { 658 .act_mask = cbuts.act_mask, 659 .buf_size = cbuts.buf_size, 660 .buf_nr = cbuts.buf_nr, 661 .start_lba = cbuts.start_lba, 662 .end_lba = cbuts.end_lba, 663 .pid = cbuts.pid, 664 }; 665 666 mutex_lock(&q->debugfs_mutex); 667 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 668 mutex_unlock(&q->debugfs_mutex); 669 if (ret) 670 return ret; 671 672 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { 673 blk_trace_remove(q); 674 return -EFAULT; 675 } 676 677 return 0; 678 } 679 #endif 680 681 static int __blk_trace_startstop(struct request_queue *q, int start) 682 { 683 struct blk_trace *bt; 684 685 bt = rcu_dereference_protected(q->blk_trace, 686 lockdep_is_held(&q->debugfs_mutex)); 687 if (bt == NULL) 688 return -EINVAL; 689 690 if (start) 691 return blk_trace_start(bt); 692 else 693 return blk_trace_stop(bt); 694 } 695 696 int blk_trace_startstop(struct request_queue *q, int start) 697 { 698 int ret; 699 700 mutex_lock(&q->debugfs_mutex); 701 ret = __blk_trace_startstop(q, start); 702 mutex_unlock(&q->debugfs_mutex); 703 704 return ret; 705 } 706 EXPORT_SYMBOL_GPL(blk_trace_startstop); 707 708 /* 709 * When reading or writing the blktrace sysfs files, the references to the 710 * opened sysfs or device files should prevent the underlying block device 711 * from being removed. So no further delete protection is really needed. 712 */ 713 714 /** 715 * blk_trace_ioctl - handle the ioctls associated with tracing 716 * @bdev: the block device 717 * @cmd: the ioctl cmd 718 * @arg: the argument data, if any 719 * 720 **/ 721 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) 722 { 723 struct request_queue *q = bdev_get_queue(bdev); 724 int ret, start = 0; 725 char b[BDEVNAME_SIZE]; 726 727 switch (cmd) { 728 case BLKTRACESETUP: 729 snprintf(b, sizeof(b), "%pg", bdev); 730 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 731 break; 732 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 733 case BLKTRACESETUP32: 734 snprintf(b, sizeof(b), "%pg", bdev); 735 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 736 break; 737 #endif 738 case BLKTRACESTART: 739 start = 1; 740 fallthrough; 741 case BLKTRACESTOP: 742 ret = blk_trace_startstop(q, start); 743 break; 744 case BLKTRACETEARDOWN: 745 ret = blk_trace_remove(q); 746 break; 747 default: 748 ret = -ENOTTY; 749 break; 750 } 751 return ret; 752 } 753 754 /** 755 * blk_trace_shutdown - stop and cleanup trace structures 756 * @q: the request queue associated with the device 757 * 758 **/ 759 void blk_trace_shutdown(struct request_queue *q) 760 { 761 if (rcu_dereference_protected(q->blk_trace, 762 lockdep_is_held(&q->debugfs_mutex))) 763 __blk_trace_remove(q); 764 } 765 766 #ifdef CONFIG_BLK_CGROUP 767 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 768 { 769 struct cgroup_subsys_state *blkcg_css; 770 struct blk_trace *bt; 771 772 /* We don't use the 'bt' value here except as an optimization... */ 773 bt = rcu_dereference_protected(q->blk_trace, 1); 774 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 775 return 0; 776 777 blkcg_css = bio_blkcg_css(bio); 778 if (!blkcg_css) 779 return 0; 780 return cgroup_id(blkcg_css->cgroup); 781 } 782 #else 783 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 784 { 785 return 0; 786 } 787 #endif 788 789 static u64 790 blk_trace_request_get_cgid(struct request *rq) 791 { 792 if (!rq->bio) 793 return 0; 794 /* Use the first bio */ 795 return blk_trace_bio_get_cgid(rq->q, rq->bio); 796 } 797 798 /* 799 * blktrace probes 800 */ 801 802 /** 803 * blk_add_trace_rq - Add a trace for a request oriented action 804 * @rq: the source request 805 * @error: return status to log 806 * @nr_bytes: number of completed bytes 807 * @what: the action 808 * @cgid: the cgroup info 809 * 810 * Description: 811 * Records an action against a request. Will log the bio offset + size. 812 * 813 **/ 814 static void blk_add_trace_rq(struct request *rq, blk_status_t error, 815 unsigned int nr_bytes, u32 what, u64 cgid) 816 { 817 struct blk_trace *bt; 818 819 rcu_read_lock(); 820 bt = rcu_dereference(rq->q->blk_trace); 821 if (likely(!bt)) { 822 rcu_read_unlock(); 823 return; 824 } 825 826 if (blk_rq_is_passthrough(rq)) 827 what |= BLK_TC_ACT(BLK_TC_PC); 828 else 829 what |= BLK_TC_ACT(BLK_TC_FS); 830 831 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags, 832 what, blk_status_to_errno(error), 0, NULL, cgid); 833 rcu_read_unlock(); 834 } 835 836 static void blk_add_trace_rq_insert(void *ignore, struct request *rq) 837 { 838 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, 839 blk_trace_request_get_cgid(rq)); 840 } 841 842 static void blk_add_trace_rq_issue(void *ignore, struct request *rq) 843 { 844 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE, 845 blk_trace_request_get_cgid(rq)); 846 } 847 848 static void blk_add_trace_rq_merge(void *ignore, struct request *rq) 849 { 850 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE, 851 blk_trace_request_get_cgid(rq)); 852 } 853 854 static void blk_add_trace_rq_requeue(void *ignore, struct request *rq) 855 { 856 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE, 857 blk_trace_request_get_cgid(rq)); 858 } 859 860 static void blk_add_trace_rq_complete(void *ignore, struct request *rq, 861 blk_status_t error, unsigned int nr_bytes) 862 { 863 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE, 864 blk_trace_request_get_cgid(rq)); 865 } 866 867 /** 868 * blk_add_trace_bio - Add a trace for a bio oriented action 869 * @q: queue the io is for 870 * @bio: the source bio 871 * @what: the action 872 * @error: error, if any 873 * 874 * Description: 875 * Records an action against a bio. Will log the bio offset + size. 876 * 877 **/ 878 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, 879 u32 what, int error) 880 { 881 struct blk_trace *bt; 882 883 rcu_read_lock(); 884 bt = rcu_dereference(q->blk_trace); 885 if (likely(!bt)) { 886 rcu_read_unlock(); 887 return; 888 } 889 890 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 891 bio->bi_opf, what, error, 0, NULL, 892 blk_trace_bio_get_cgid(q, bio)); 893 rcu_read_unlock(); 894 } 895 896 static void blk_add_trace_bio_complete(void *ignore, 897 struct request_queue *q, struct bio *bio) 898 { 899 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, 900 blk_status_to_errno(bio->bi_status)); 901 } 902 903 static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio) 904 { 905 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE, 906 0); 907 } 908 909 static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio) 910 { 911 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE, 912 0); 913 } 914 915 static void blk_add_trace_bio_queue(void *ignore, struct bio *bio) 916 { 917 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0); 918 } 919 920 static void blk_add_trace_getrq(void *ignore, struct bio *bio) 921 { 922 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0); 923 } 924 925 static void blk_add_trace_plug(void *ignore, struct request_queue *q) 926 { 927 struct blk_trace *bt; 928 929 rcu_read_lock(); 930 bt = rcu_dereference(q->blk_trace); 931 if (bt) 932 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0); 933 rcu_read_unlock(); 934 } 935 936 static void blk_add_trace_unplug(void *ignore, struct request_queue *q, 937 unsigned int depth, bool explicit) 938 { 939 struct blk_trace *bt; 940 941 rcu_read_lock(); 942 bt = rcu_dereference(q->blk_trace); 943 if (bt) { 944 __be64 rpdu = cpu_to_be64(depth); 945 u32 what; 946 947 if (explicit) 948 what = BLK_TA_UNPLUG_IO; 949 else 950 what = BLK_TA_UNPLUG_TIMER; 951 952 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0); 953 } 954 rcu_read_unlock(); 955 } 956 957 static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu) 958 { 959 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 960 struct blk_trace *bt; 961 962 rcu_read_lock(); 963 bt = rcu_dereference(q->blk_trace); 964 if (bt) { 965 __be64 rpdu = cpu_to_be64(pdu); 966 967 __blk_add_trace(bt, bio->bi_iter.bi_sector, 968 bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT, 969 blk_status_to_errno(bio->bi_status), 970 sizeof(rpdu), &rpdu, 971 blk_trace_bio_get_cgid(q, bio)); 972 } 973 rcu_read_unlock(); 974 } 975 976 /** 977 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation 978 * @ignore: trace callback data parameter (not used) 979 * @bio: the source bio 980 * @dev: source device 981 * @from: source sector 982 * 983 * Called after a bio is remapped to a different device and/or sector. 984 **/ 985 static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev, 986 sector_t from) 987 { 988 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 989 struct blk_trace *bt; 990 struct blk_io_trace_remap r; 991 992 rcu_read_lock(); 993 bt = rcu_dereference(q->blk_trace); 994 if (likely(!bt)) { 995 rcu_read_unlock(); 996 return; 997 } 998 999 r.device_from = cpu_to_be32(dev); 1000 r.device_to = cpu_to_be32(bio_dev(bio)); 1001 r.sector_from = cpu_to_be64(from); 1002 1003 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 1004 bio->bi_opf, BLK_TA_REMAP, 1005 blk_status_to_errno(bio->bi_status), 1006 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); 1007 rcu_read_unlock(); 1008 } 1009 1010 /** 1011 * blk_add_trace_rq_remap - Add a trace for a request-remap operation 1012 * @ignore: trace callback data parameter (not used) 1013 * @rq: the source request 1014 * @dev: target device 1015 * @from: source sector 1016 * 1017 * Description: 1018 * Device mapper remaps request to other devices. 1019 * Add a trace for that action. 1020 * 1021 **/ 1022 static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, 1023 sector_t from) 1024 { 1025 struct blk_trace *bt; 1026 struct blk_io_trace_remap r; 1027 1028 rcu_read_lock(); 1029 bt = rcu_dereference(rq->q->blk_trace); 1030 if (likely(!bt)) { 1031 rcu_read_unlock(); 1032 return; 1033 } 1034 1035 r.device_from = cpu_to_be32(dev); 1036 r.device_to = cpu_to_be32(disk_devt(rq->q->disk)); 1037 r.sector_from = cpu_to_be64(from); 1038 1039 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 1040 rq->cmd_flags, BLK_TA_REMAP, 0, 1041 sizeof(r), &r, blk_trace_request_get_cgid(rq)); 1042 rcu_read_unlock(); 1043 } 1044 1045 /** 1046 * blk_add_driver_data - Add binary message with driver-specific data 1047 * @rq: io request 1048 * @data: driver-specific data 1049 * @len: length of driver-specific data 1050 * 1051 * Description: 1052 * Some drivers might want to write driver-specific data per request. 1053 * 1054 **/ 1055 void blk_add_driver_data(struct request *rq, void *data, size_t len) 1056 { 1057 struct blk_trace *bt; 1058 1059 rcu_read_lock(); 1060 bt = rcu_dereference(rq->q->blk_trace); 1061 if (likely(!bt)) { 1062 rcu_read_unlock(); 1063 return; 1064 } 1065 1066 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 1067 BLK_TA_DRV_DATA, 0, len, data, 1068 blk_trace_request_get_cgid(rq)); 1069 rcu_read_unlock(); 1070 } 1071 EXPORT_SYMBOL_GPL(blk_add_driver_data); 1072 1073 static void blk_register_tracepoints(void) 1074 { 1075 int ret; 1076 1077 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); 1078 WARN_ON(ret); 1079 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); 1080 WARN_ON(ret); 1081 ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); 1082 WARN_ON(ret); 1083 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); 1084 WARN_ON(ret); 1085 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); 1086 WARN_ON(ret); 1087 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); 1088 WARN_ON(ret); 1089 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); 1090 WARN_ON(ret); 1091 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); 1092 WARN_ON(ret); 1093 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); 1094 WARN_ON(ret); 1095 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL); 1096 WARN_ON(ret); 1097 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1098 WARN_ON(ret); 1099 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); 1100 WARN_ON(ret); 1101 ret = register_trace_block_split(blk_add_trace_split, NULL); 1102 WARN_ON(ret); 1103 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1104 WARN_ON(ret); 1105 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1106 WARN_ON(ret); 1107 } 1108 1109 static void blk_unregister_tracepoints(void) 1110 { 1111 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1112 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1113 unregister_trace_block_split(blk_add_trace_split, NULL); 1114 unregister_trace_block_unplug(blk_add_trace_unplug, NULL); 1115 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1116 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1117 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); 1118 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); 1119 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); 1120 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); 1121 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); 1122 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); 1123 unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); 1124 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); 1125 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); 1126 1127 tracepoint_synchronize_unregister(); 1128 } 1129 1130 /* 1131 * struct blk_io_tracer formatting routines 1132 */ 1133 1134 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) 1135 { 1136 int i = 0; 1137 int tc = t->action >> BLK_TC_SHIFT; 1138 1139 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1140 rwbs[i++] = 'N'; 1141 goto out; 1142 } 1143 1144 if (tc & BLK_TC_FLUSH) 1145 rwbs[i++] = 'F'; 1146 1147 if (tc & BLK_TC_DISCARD) 1148 rwbs[i++] = 'D'; 1149 else if (tc & BLK_TC_WRITE) 1150 rwbs[i++] = 'W'; 1151 else if (t->bytes) 1152 rwbs[i++] = 'R'; 1153 else 1154 rwbs[i++] = 'N'; 1155 1156 if (tc & BLK_TC_FUA) 1157 rwbs[i++] = 'F'; 1158 if (tc & BLK_TC_AHEAD) 1159 rwbs[i++] = 'A'; 1160 if (tc & BLK_TC_SYNC) 1161 rwbs[i++] = 'S'; 1162 if (tc & BLK_TC_META) 1163 rwbs[i++] = 'M'; 1164 out: 1165 rwbs[i] = '\0'; 1166 } 1167 1168 static inline 1169 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) 1170 { 1171 return (const struct blk_io_trace *)ent; 1172 } 1173 1174 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg) 1175 { 1176 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0); 1177 } 1178 1179 static inline u64 t_cgid(const struct trace_entry *ent) 1180 { 1181 return *(u64 *)(te_blk_io_trace(ent) + 1); 1182 } 1183 1184 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg) 1185 { 1186 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0); 1187 } 1188 1189 static inline u32 t_action(const struct trace_entry *ent) 1190 { 1191 return te_blk_io_trace(ent)->action; 1192 } 1193 1194 static inline u32 t_bytes(const struct trace_entry *ent) 1195 { 1196 return te_blk_io_trace(ent)->bytes; 1197 } 1198 1199 static inline u32 t_sec(const struct trace_entry *ent) 1200 { 1201 return te_blk_io_trace(ent)->bytes >> 9; 1202 } 1203 1204 static inline unsigned long long t_sector(const struct trace_entry *ent) 1205 { 1206 return te_blk_io_trace(ent)->sector; 1207 } 1208 1209 static inline __u16 t_error(const struct trace_entry *ent) 1210 { 1211 return te_blk_io_trace(ent)->error; 1212 } 1213 1214 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) 1215 { 1216 const __be64 *val = pdu_start(ent, has_cg); 1217 return be64_to_cpu(*val); 1218 } 1219 1220 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, 1221 bool has_cg); 1222 1223 static void blk_log_action_classic(struct trace_iterator *iter, const char *act, 1224 bool has_cg) 1225 { 1226 char rwbs[RWBS_LEN]; 1227 unsigned long long ts = iter->ts; 1228 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); 1229 unsigned secs = (unsigned long)ts; 1230 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1231 1232 fill_rwbs(rwbs, t); 1233 1234 trace_seq_printf(&iter->seq, 1235 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", 1236 MAJOR(t->device), MINOR(t->device), iter->cpu, 1237 secs, nsec_rem, iter->ent->pid, act, rwbs); 1238 } 1239 1240 static void blk_log_action(struct trace_iterator *iter, const char *act, 1241 bool has_cg) 1242 { 1243 char rwbs[RWBS_LEN]; 1244 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1245 1246 fill_rwbs(rwbs, t); 1247 if (has_cg) { 1248 u64 id = t_cgid(iter->ent); 1249 1250 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) { 1251 char blkcg_name_buf[NAME_MAX + 1] = "<...>"; 1252 1253 cgroup_path_from_kernfs_id(id, blkcg_name_buf, 1254 sizeof(blkcg_name_buf)); 1255 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ", 1256 MAJOR(t->device), MINOR(t->device), 1257 blkcg_name_buf, act, rwbs); 1258 } else { 1259 /* 1260 * The cgid portion used to be "INO,GEN". Userland 1261 * builds a FILEID_INO32_GEN fid out of them and 1262 * opens the cgroup using open_by_handle_at(2). 1263 * While 32bit ino setups are still the same, 64bit 1264 * ones now use the 64bit ino as the whole ID and 1265 * no longer use generation. 1266 * 1267 * Regardless of the content, always output 1268 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can 1269 * be mapped back to @id on both 64 and 32bit ino 1270 * setups. See __kernfs_fh_to_dentry(). 1271 */ 1272 trace_seq_printf(&iter->seq, 1273 "%3d,%-3d %llx,%-llx %2s %3s ", 1274 MAJOR(t->device), MINOR(t->device), 1275 id & U32_MAX, id >> 32, act, rwbs); 1276 } 1277 } else 1278 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", 1279 MAJOR(t->device), MINOR(t->device), act, rwbs); 1280 } 1281 1282 static void blk_log_dump_pdu(struct trace_seq *s, 1283 const struct trace_entry *ent, bool has_cg) 1284 { 1285 const unsigned char *pdu_buf; 1286 int pdu_len; 1287 int i, end; 1288 1289 pdu_buf = pdu_start(ent, has_cg); 1290 pdu_len = pdu_real_len(ent, has_cg); 1291 1292 if (!pdu_len) 1293 return; 1294 1295 /* find the last zero that needs to be printed */ 1296 for (end = pdu_len - 1; end >= 0; end--) 1297 if (pdu_buf[end]) 1298 break; 1299 end++; 1300 1301 trace_seq_putc(s, '('); 1302 1303 for (i = 0; i < pdu_len; i++) { 1304 1305 trace_seq_printf(s, "%s%02x", 1306 i == 0 ? "" : " ", pdu_buf[i]); 1307 1308 /* 1309 * stop when the rest is just zeros and indicate so 1310 * with a ".." appended 1311 */ 1312 if (i == end && end != pdu_len - 1) { 1313 trace_seq_puts(s, " ..) "); 1314 return; 1315 } 1316 } 1317 1318 trace_seq_puts(s, ") "); 1319 } 1320 1321 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1322 { 1323 char cmd[TASK_COMM_LEN]; 1324 1325 trace_find_cmdline(ent->pid, cmd); 1326 1327 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1328 trace_seq_printf(s, "%u ", t_bytes(ent)); 1329 blk_log_dump_pdu(s, ent, has_cg); 1330 trace_seq_printf(s, "[%s]\n", cmd); 1331 } else { 1332 if (t_sec(ent)) 1333 trace_seq_printf(s, "%llu + %u [%s]\n", 1334 t_sector(ent), t_sec(ent), cmd); 1335 else 1336 trace_seq_printf(s, "[%s]\n", cmd); 1337 } 1338 } 1339 1340 static void blk_log_with_error(struct trace_seq *s, 1341 const struct trace_entry *ent, bool has_cg) 1342 { 1343 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1344 blk_log_dump_pdu(s, ent, has_cg); 1345 trace_seq_printf(s, "[%d]\n", t_error(ent)); 1346 } else { 1347 if (t_sec(ent)) 1348 trace_seq_printf(s, "%llu + %u [%d]\n", 1349 t_sector(ent), 1350 t_sec(ent), t_error(ent)); 1351 else 1352 trace_seq_printf(s, "%llu [%d]\n", 1353 t_sector(ent), t_error(ent)); 1354 } 1355 } 1356 1357 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1358 { 1359 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); 1360 1361 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", 1362 t_sector(ent), t_sec(ent), 1363 MAJOR(be32_to_cpu(__r->device_from)), 1364 MINOR(be32_to_cpu(__r->device_from)), 1365 be64_to_cpu(__r->sector_from)); 1366 } 1367 1368 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1369 { 1370 char cmd[TASK_COMM_LEN]; 1371 1372 trace_find_cmdline(ent->pid, cmd); 1373 1374 trace_seq_printf(s, "[%s]\n", cmd); 1375 } 1376 1377 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1378 { 1379 char cmd[TASK_COMM_LEN]; 1380 1381 trace_find_cmdline(ent->pid, cmd); 1382 1383 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg)); 1384 } 1385 1386 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1387 { 1388 char cmd[TASK_COMM_LEN]; 1389 1390 trace_find_cmdline(ent->pid, cmd); 1391 1392 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), 1393 get_pdu_int(ent, has_cg), cmd); 1394 } 1395 1396 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent, 1397 bool has_cg) 1398 { 1399 1400 trace_seq_putmem(s, pdu_start(ent, has_cg), 1401 pdu_real_len(ent, has_cg)); 1402 trace_seq_putc(s, '\n'); 1403 } 1404 1405 /* 1406 * struct tracer operations 1407 */ 1408 1409 static void blk_tracer_print_header(struct seq_file *m) 1410 { 1411 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) 1412 return; 1413 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" 1414 "# | | | | | |\n"); 1415 } 1416 1417 static void blk_tracer_start(struct trace_array *tr) 1418 { 1419 blk_tracer_enabled = true; 1420 } 1421 1422 static int blk_tracer_init(struct trace_array *tr) 1423 { 1424 blk_tr = tr; 1425 blk_tracer_start(tr); 1426 return 0; 1427 } 1428 1429 static void blk_tracer_stop(struct trace_array *tr) 1430 { 1431 blk_tracer_enabled = false; 1432 } 1433 1434 static void blk_tracer_reset(struct trace_array *tr) 1435 { 1436 blk_tracer_stop(tr); 1437 } 1438 1439 static const struct { 1440 const char *act[2]; 1441 void (*print)(struct trace_seq *s, const struct trace_entry *ent, 1442 bool has_cg); 1443 } what2act[] = { 1444 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, 1445 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, 1446 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, 1447 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, 1448 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, 1449 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, 1450 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, 1451 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1452 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1453 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1454 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, 1455 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1456 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1457 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, 1458 }; 1459 1460 static enum print_line_t print_one_line(struct trace_iterator *iter, 1461 bool classic) 1462 { 1463 struct trace_array *tr = iter->tr; 1464 struct trace_seq *s = &iter->seq; 1465 const struct blk_io_trace *t; 1466 u16 what; 1467 bool long_act; 1468 blk_log_action_t *log_action; 1469 bool has_cg; 1470 1471 t = te_blk_io_trace(iter->ent); 1472 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP; 1473 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE); 1474 log_action = classic ? &blk_log_action_classic : &blk_log_action; 1475 has_cg = t->action & __BLK_TA_CGROUP; 1476 1477 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1478 log_action(iter, long_act ? "message" : "m", has_cg); 1479 blk_log_msg(s, iter->ent, has_cg); 1480 return trace_handle_return(s); 1481 } 1482 1483 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) 1484 trace_seq_printf(s, "Unknown action %x\n", what); 1485 else { 1486 log_action(iter, what2act[what].act[long_act], has_cg); 1487 what2act[what].print(s, iter->ent, has_cg); 1488 } 1489 1490 return trace_handle_return(s); 1491 } 1492 1493 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, 1494 int flags, struct trace_event *event) 1495 { 1496 return print_one_line(iter, false); 1497 } 1498 1499 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) 1500 { 1501 struct trace_seq *s = &iter->seq; 1502 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; 1503 const int offset = offsetof(struct blk_io_trace, sector); 1504 struct blk_io_trace old = { 1505 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, 1506 .time = iter->ts, 1507 }; 1508 1509 trace_seq_putmem(s, &old, offset); 1510 trace_seq_putmem(s, &t->sector, 1511 sizeof(old) - offset + t->pdu_len); 1512 } 1513 1514 static enum print_line_t 1515 blk_trace_event_print_binary(struct trace_iterator *iter, int flags, 1516 struct trace_event *event) 1517 { 1518 blk_trace_synthesize_old_trace(iter); 1519 1520 return trace_handle_return(&iter->seq); 1521 } 1522 1523 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) 1524 { 1525 if ((iter->ent->type != TRACE_BLK) || 1526 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) 1527 return TRACE_TYPE_UNHANDLED; 1528 1529 return print_one_line(iter, true); 1530 } 1531 1532 static int 1533 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 1534 { 1535 /* don't output context-info for blk_classic output */ 1536 if (bit == TRACE_BLK_OPT_CLASSIC) { 1537 if (set) 1538 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO; 1539 else 1540 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO; 1541 } 1542 return 0; 1543 } 1544 1545 static struct tracer blk_tracer __read_mostly = { 1546 .name = "blk", 1547 .init = blk_tracer_init, 1548 .reset = blk_tracer_reset, 1549 .start = blk_tracer_start, 1550 .stop = blk_tracer_stop, 1551 .print_header = blk_tracer_print_header, 1552 .print_line = blk_tracer_print_line, 1553 .flags = &blk_tracer_flags, 1554 .set_flag = blk_tracer_set_flag, 1555 }; 1556 1557 static struct trace_event_functions trace_blk_event_funcs = { 1558 .trace = blk_trace_event_print, 1559 .binary = blk_trace_event_print_binary, 1560 }; 1561 1562 static struct trace_event trace_blk_event = { 1563 .type = TRACE_BLK, 1564 .funcs = &trace_blk_event_funcs, 1565 }; 1566 1567 static int __init init_blk_tracer(void) 1568 { 1569 if (!register_trace_event(&trace_blk_event)) { 1570 pr_warn("Warning: could not register block events\n"); 1571 return 1; 1572 } 1573 1574 if (register_tracer(&blk_tracer) != 0) { 1575 pr_warn("Warning: could not register the block tracer\n"); 1576 unregister_trace_event(&trace_blk_event); 1577 return 1; 1578 } 1579 1580 return 0; 1581 } 1582 1583 device_initcall(init_blk_tracer); 1584 1585 static int blk_trace_remove_queue(struct request_queue *q) 1586 { 1587 struct blk_trace *bt; 1588 1589 bt = rcu_replace_pointer(q->blk_trace, NULL, 1590 lockdep_is_held(&q->debugfs_mutex)); 1591 if (bt == NULL) 1592 return -EINVAL; 1593 1594 blk_trace_stop(bt); 1595 1596 put_probe_ref(); 1597 synchronize_rcu(); 1598 blk_trace_free(q, bt); 1599 return 0; 1600 } 1601 1602 /* 1603 * Setup everything required to start tracing 1604 */ 1605 static int blk_trace_setup_queue(struct request_queue *q, 1606 struct block_device *bdev) 1607 { 1608 struct blk_trace *bt = NULL; 1609 int ret = -ENOMEM; 1610 1611 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 1612 if (!bt) 1613 return -ENOMEM; 1614 1615 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); 1616 if (!bt->msg_data) 1617 goto free_bt; 1618 1619 bt->dev = bdev->bd_dev; 1620 bt->act_mask = (u16)-1; 1621 1622 blk_trace_setup_lba(bt, bdev); 1623 1624 rcu_assign_pointer(q->blk_trace, bt); 1625 get_probe_ref(); 1626 return 0; 1627 1628 free_bt: 1629 blk_trace_free(q, bt); 1630 return ret; 1631 } 1632 1633 /* 1634 * sysfs interface to enable and configure tracing 1635 */ 1636 1637 static ssize_t sysfs_blk_trace_attr_show(struct device *dev, 1638 struct device_attribute *attr, 1639 char *buf); 1640 static ssize_t sysfs_blk_trace_attr_store(struct device *dev, 1641 struct device_attribute *attr, 1642 const char *buf, size_t count); 1643 #define BLK_TRACE_DEVICE_ATTR(_name) \ 1644 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ 1645 sysfs_blk_trace_attr_show, \ 1646 sysfs_blk_trace_attr_store) 1647 1648 static BLK_TRACE_DEVICE_ATTR(enable); 1649 static BLK_TRACE_DEVICE_ATTR(act_mask); 1650 static BLK_TRACE_DEVICE_ATTR(pid); 1651 static BLK_TRACE_DEVICE_ATTR(start_lba); 1652 static BLK_TRACE_DEVICE_ATTR(end_lba); 1653 1654 static struct attribute *blk_trace_attrs[] = { 1655 &dev_attr_enable.attr, 1656 &dev_attr_act_mask.attr, 1657 &dev_attr_pid.attr, 1658 &dev_attr_start_lba.attr, 1659 &dev_attr_end_lba.attr, 1660 NULL 1661 }; 1662 1663 struct attribute_group blk_trace_attr_group = { 1664 .name = "trace", 1665 .attrs = blk_trace_attrs, 1666 }; 1667 1668 static const struct { 1669 int mask; 1670 const char *str; 1671 } mask_maps[] = { 1672 { BLK_TC_READ, "read" }, 1673 { BLK_TC_WRITE, "write" }, 1674 { BLK_TC_FLUSH, "flush" }, 1675 { BLK_TC_SYNC, "sync" }, 1676 { BLK_TC_QUEUE, "queue" }, 1677 { BLK_TC_REQUEUE, "requeue" }, 1678 { BLK_TC_ISSUE, "issue" }, 1679 { BLK_TC_COMPLETE, "complete" }, 1680 { BLK_TC_FS, "fs" }, 1681 { BLK_TC_PC, "pc" }, 1682 { BLK_TC_NOTIFY, "notify" }, 1683 { BLK_TC_AHEAD, "ahead" }, 1684 { BLK_TC_META, "meta" }, 1685 { BLK_TC_DISCARD, "discard" }, 1686 { BLK_TC_DRV_DATA, "drv_data" }, 1687 { BLK_TC_FUA, "fua" }, 1688 }; 1689 1690 static int blk_trace_str2mask(const char *str) 1691 { 1692 int i; 1693 int mask = 0; 1694 char *buf, *s, *token; 1695 1696 buf = kstrdup(str, GFP_KERNEL); 1697 if (buf == NULL) 1698 return -ENOMEM; 1699 s = strstrip(buf); 1700 1701 while (1) { 1702 token = strsep(&s, ","); 1703 if (token == NULL) 1704 break; 1705 1706 if (*token == '\0') 1707 continue; 1708 1709 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { 1710 if (strcasecmp(token, mask_maps[i].str) == 0) { 1711 mask |= mask_maps[i].mask; 1712 break; 1713 } 1714 } 1715 if (i == ARRAY_SIZE(mask_maps)) { 1716 mask = -EINVAL; 1717 break; 1718 } 1719 } 1720 kfree(buf); 1721 1722 return mask; 1723 } 1724 1725 static ssize_t blk_trace_mask2str(char *buf, int mask) 1726 { 1727 int i; 1728 char *p = buf; 1729 1730 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { 1731 if (mask & mask_maps[i].mask) { 1732 p += sprintf(p, "%s%s", 1733 (p == buf) ? "" : ",", mask_maps[i].str); 1734 } 1735 } 1736 *p++ = '\n'; 1737 1738 return p - buf; 1739 } 1740 1741 static ssize_t sysfs_blk_trace_attr_show(struct device *dev, 1742 struct device_attribute *attr, 1743 char *buf) 1744 { 1745 struct block_device *bdev = dev_to_bdev(dev); 1746 struct request_queue *q = bdev_get_queue(bdev); 1747 struct blk_trace *bt; 1748 ssize_t ret = -ENXIO; 1749 1750 mutex_lock(&q->debugfs_mutex); 1751 1752 bt = rcu_dereference_protected(q->blk_trace, 1753 lockdep_is_held(&q->debugfs_mutex)); 1754 if (attr == &dev_attr_enable) { 1755 ret = sprintf(buf, "%u\n", !!bt); 1756 goto out_unlock_bdev; 1757 } 1758 1759 if (bt == NULL) 1760 ret = sprintf(buf, "disabled\n"); 1761 else if (attr == &dev_attr_act_mask) 1762 ret = blk_trace_mask2str(buf, bt->act_mask); 1763 else if (attr == &dev_attr_pid) 1764 ret = sprintf(buf, "%u\n", bt->pid); 1765 else if (attr == &dev_attr_start_lba) 1766 ret = sprintf(buf, "%llu\n", bt->start_lba); 1767 else if (attr == &dev_attr_end_lba) 1768 ret = sprintf(buf, "%llu\n", bt->end_lba); 1769 1770 out_unlock_bdev: 1771 mutex_unlock(&q->debugfs_mutex); 1772 return ret; 1773 } 1774 1775 static ssize_t sysfs_blk_trace_attr_store(struct device *dev, 1776 struct device_attribute *attr, 1777 const char *buf, size_t count) 1778 { 1779 struct block_device *bdev = dev_to_bdev(dev); 1780 struct request_queue *q = bdev_get_queue(bdev); 1781 struct blk_trace *bt; 1782 u64 value; 1783 ssize_t ret = -EINVAL; 1784 1785 if (count == 0) 1786 goto out; 1787 1788 if (attr == &dev_attr_act_mask) { 1789 if (kstrtoull(buf, 0, &value)) { 1790 /* Assume it is a list of trace category names */ 1791 ret = blk_trace_str2mask(buf); 1792 if (ret < 0) 1793 goto out; 1794 value = ret; 1795 } 1796 } else { 1797 if (kstrtoull(buf, 0, &value)) 1798 goto out; 1799 } 1800 1801 mutex_lock(&q->debugfs_mutex); 1802 1803 bt = rcu_dereference_protected(q->blk_trace, 1804 lockdep_is_held(&q->debugfs_mutex)); 1805 if (attr == &dev_attr_enable) { 1806 if (!!value == !!bt) { 1807 ret = 0; 1808 goto out_unlock_bdev; 1809 } 1810 if (value) 1811 ret = blk_trace_setup_queue(q, bdev); 1812 else 1813 ret = blk_trace_remove_queue(q); 1814 goto out_unlock_bdev; 1815 } 1816 1817 ret = 0; 1818 if (bt == NULL) { 1819 ret = blk_trace_setup_queue(q, bdev); 1820 bt = rcu_dereference_protected(q->blk_trace, 1821 lockdep_is_held(&q->debugfs_mutex)); 1822 } 1823 1824 if (ret == 0) { 1825 if (attr == &dev_attr_act_mask) 1826 bt->act_mask = value; 1827 else if (attr == &dev_attr_pid) 1828 bt->pid = value; 1829 else if (attr == &dev_attr_start_lba) 1830 bt->start_lba = value; 1831 else if (attr == &dev_attr_end_lba) 1832 bt->end_lba = value; 1833 } 1834 1835 out_unlock_bdev: 1836 mutex_unlock(&q->debugfs_mutex); 1837 out: 1838 return ret ? ret : count; 1839 } 1840 #endif /* CONFIG_BLK_DEV_IO_TRACE */ 1841 1842 #ifdef CONFIG_EVENT_TRACING 1843 1844 /** 1845 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string. 1846 * @rwbs: buffer to be filled 1847 * @opf: request operation type (REQ_OP_XXX) and flags for the tracepoint 1848 * 1849 * Description: 1850 * Maps each request operation and flag to a single character and fills the 1851 * buffer provided by the caller with resulting string. 1852 * 1853 **/ 1854 void blk_fill_rwbs(char *rwbs, blk_opf_t opf) 1855 { 1856 int i = 0; 1857 1858 if (opf & REQ_PREFLUSH) 1859 rwbs[i++] = 'F'; 1860 1861 switch (opf & REQ_OP_MASK) { 1862 case REQ_OP_WRITE: 1863 rwbs[i++] = 'W'; 1864 break; 1865 case REQ_OP_DISCARD: 1866 rwbs[i++] = 'D'; 1867 break; 1868 case REQ_OP_SECURE_ERASE: 1869 rwbs[i++] = 'D'; 1870 rwbs[i++] = 'E'; 1871 break; 1872 case REQ_OP_FLUSH: 1873 rwbs[i++] = 'F'; 1874 break; 1875 case REQ_OP_READ: 1876 rwbs[i++] = 'R'; 1877 break; 1878 default: 1879 rwbs[i++] = 'N'; 1880 } 1881 1882 if (opf & REQ_FUA) 1883 rwbs[i++] = 'F'; 1884 if (opf & REQ_RAHEAD) 1885 rwbs[i++] = 'A'; 1886 if (opf & REQ_SYNC) 1887 rwbs[i++] = 'S'; 1888 if (opf & REQ_META) 1889 rwbs[i++] = 'M'; 1890 if (opf & REQ_ATOMIC) 1891 rwbs[i++] = 'U'; 1892 1893 rwbs[i] = '\0'; 1894 } 1895 EXPORT_SYMBOL_GPL(blk_fill_rwbs); 1896 1897 #endif /* CONFIG_EVENT_TRACING */ 1898 1899