1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> 4 * 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/blkdev.h> 11 #include <linux/blktrace_api.h> 12 #include <linux/percpu.h> 13 #include <linux/init.h> 14 #include <linux/mutex.h> 15 #include <linux/slab.h> 16 #include <linux/debugfs.h> 17 #include <linux/export.h> 18 #include <linux/time.h> 19 #include <linux/uaccess.h> 20 #include <linux/list.h> 21 #include <linux/blk-cgroup.h> 22 23 #include "../../block/blk.h" 24 25 #include <trace/events/block.h> 26 27 #include "trace_output.h" 28 29 #ifdef CONFIG_BLK_DEV_IO_TRACE 30 31 static unsigned int blktrace_seq __read_mostly = 1; 32 33 static struct trace_array *blk_tr; 34 static bool blk_tracer_enabled __read_mostly; 35 36 static LIST_HEAD(running_trace_list); 37 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock); 38 39 /* Select an alternative, minimalistic output than the original one */ 40 #define TRACE_BLK_OPT_CLASSIC 0x1 41 #define TRACE_BLK_OPT_CGROUP 0x2 42 #define TRACE_BLK_OPT_CGNAME 0x4 43 44 static struct tracer_opt blk_tracer_opts[] = { 45 /* Default disable the minimalistic output */ 46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, 47 #ifdef CONFIG_BLK_CGROUP 48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) }, 49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) }, 50 #endif 51 { } 52 }; 53 54 static struct tracer_flags blk_tracer_flags = { 55 .val = 0, 56 .opts = blk_tracer_opts, 57 }; 58 59 /* Global reference count of probes */ 60 static DEFINE_MUTEX(blk_probe_mutex); 61 static int blk_probes_ref; 62 63 static void blk_register_tracepoints(void); 64 static void blk_unregister_tracepoints(void); 65 66 /* 67 * Send out a notify message. 68 */ 69 static void trace_note(struct blk_trace *bt, pid_t pid, int action, 70 const void *data, size_t len, u64 cgid) 71 { 72 struct blk_io_trace *t; 73 struct ring_buffer_event *event = NULL; 74 struct trace_buffer *buffer = NULL; 75 unsigned int trace_ctx = 0; 76 int cpu = smp_processor_id(); 77 bool blk_tracer = blk_tracer_enabled; 78 ssize_t cgid_len = cgid ? sizeof(cgid) : 0; 79 80 if (blk_tracer) { 81 buffer = blk_tr->array_buffer.buffer; 82 trace_ctx = tracing_gen_ctx_flags(0); 83 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 84 sizeof(*t) + len + cgid_len, 85 trace_ctx); 86 if (!event) 87 return; 88 t = ring_buffer_event_data(event); 89 goto record_it; 90 } 91 92 if (!bt->rchan) 93 return; 94 95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); 96 if (t) { 97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 98 t->time = ktime_to_ns(ktime_get()); 99 record_it: 100 t->device = bt->dev; 101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0); 102 t->pid = pid; 103 t->cpu = cpu; 104 t->pdu_len = len + cgid_len; 105 if (cgid_len) 106 memcpy((void *)t + sizeof(*t), &cgid, cgid_len); 107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len); 108 109 if (blk_tracer) 110 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx); 111 } 112 } 113 114 /* 115 * Send out a notify for this process, if we haven't done so since a trace 116 * started 117 */ 118 static void trace_note_tsk(struct task_struct *tsk) 119 { 120 unsigned long flags; 121 struct blk_trace *bt; 122 123 tsk->btrace_seq = blktrace_seq; 124 raw_spin_lock_irqsave(&running_trace_lock, flags); 125 list_for_each_entry(bt, &running_trace_list, running_list) { 126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, 127 sizeof(tsk->comm), 0); 128 } 129 raw_spin_unlock_irqrestore(&running_trace_lock, flags); 130 } 131 132 static void trace_note_time(struct blk_trace *bt) 133 { 134 struct timespec64 now; 135 unsigned long flags; 136 u32 words[2]; 137 138 /* need to check user space to see if this breaks in y2038 or y2106 */ 139 ktime_get_real_ts64(&now); 140 words[0] = (u32)now.tv_sec; 141 words[1] = now.tv_nsec; 142 143 local_irq_save(flags); 144 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0); 145 local_irq_restore(flags); 146 } 147 148 void __blk_trace_note_message(struct blk_trace *bt, 149 struct cgroup_subsys_state *css, const char *fmt, ...) 150 { 151 int n; 152 va_list args; 153 unsigned long flags; 154 char *buf; 155 u64 cgid = 0; 156 157 if (unlikely(bt->trace_state != Blktrace_running && 158 !blk_tracer_enabled)) 159 return; 160 161 /* 162 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note 163 * message to the trace. 164 */ 165 if (!(bt->act_mask & BLK_TC_NOTIFY)) 166 return; 167 168 local_irq_save(flags); 169 buf = this_cpu_ptr(bt->msg_data); 170 va_start(args, fmt); 171 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); 172 va_end(args); 173 174 #ifdef CONFIG_BLK_CGROUP 175 if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 176 cgid = cgroup_id(css->cgroup); 177 else 178 cgid = 1; 179 #endif 180 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid); 181 local_irq_restore(flags); 182 } 183 EXPORT_SYMBOL_GPL(__blk_trace_note_message); 184 185 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, 186 pid_t pid) 187 { 188 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) 189 return 1; 190 if (sector && (sector < bt->start_lba || sector > bt->end_lba)) 191 return 1; 192 if (bt->pid && pid != bt->pid) 193 return 1; 194 195 return 0; 196 } 197 198 /* 199 * Data direction bit lookup 200 */ 201 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 202 BLK_TC_ACT(BLK_TC_WRITE) }; 203 204 #define BLK_TC_RAHEAD BLK_TC_AHEAD 205 #define BLK_TC_PREFLUSH BLK_TC_FLUSH 206 207 /* The ilog2() calls fall out because they're constant */ 208 #define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \ 209 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) 210 211 /* 212 * The worker for the various blk_add_trace*() types. Fills out a 213 * blk_io_trace structure and places it in a per-cpu subbuffer. 214 */ 215 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, 216 const blk_opf_t opf, u32 what, int error, 217 int pdu_len, void *pdu_data, u64 cgid) 218 { 219 struct task_struct *tsk = current; 220 struct ring_buffer_event *event = NULL; 221 struct trace_buffer *buffer = NULL; 222 struct blk_io_trace *t; 223 unsigned long flags = 0; 224 unsigned long *sequence; 225 unsigned int trace_ctx = 0; 226 pid_t pid; 227 int cpu; 228 bool blk_tracer = blk_tracer_enabled; 229 ssize_t cgid_len = cgid ? sizeof(cgid) : 0; 230 const enum req_op op = opf & REQ_OP_MASK; 231 232 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) 233 return; 234 235 what |= ddir_act[op_is_write(op) ? WRITE : READ]; 236 what |= MASK_TC_BIT(opf, SYNC); 237 what |= MASK_TC_BIT(opf, RAHEAD); 238 what |= MASK_TC_BIT(opf, META); 239 what |= MASK_TC_BIT(opf, PREFLUSH); 240 what |= MASK_TC_BIT(opf, FUA); 241 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) 242 what |= BLK_TC_ACT(BLK_TC_DISCARD); 243 if (op == REQ_OP_FLUSH) 244 what |= BLK_TC_ACT(BLK_TC_FLUSH); 245 if (cgid) 246 what |= __BLK_TA_CGROUP; 247 248 pid = tsk->pid; 249 if (act_log_check(bt, what, sector, pid)) 250 return; 251 cpu = raw_smp_processor_id(); 252 253 if (blk_tracer) { 254 tracing_record_cmdline(current); 255 256 buffer = blk_tr->array_buffer.buffer; 257 trace_ctx = tracing_gen_ctx_flags(0); 258 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 259 sizeof(*t) + pdu_len + cgid_len, 260 trace_ctx); 261 if (!event) 262 return; 263 t = ring_buffer_event_data(event); 264 goto record_it; 265 } 266 267 if (unlikely(tsk->btrace_seq != blktrace_seq)) 268 trace_note_tsk(tsk); 269 270 /* 271 * A word about the locking here - we disable interrupts to reserve 272 * some space in the relay per-cpu buffer, to prevent an irq 273 * from coming in and stepping on our toes. 274 */ 275 local_irq_save(flags); 276 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len); 277 if (t) { 278 sequence = per_cpu_ptr(bt->sequence, cpu); 279 280 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 281 t->sequence = ++(*sequence); 282 t->time = ktime_to_ns(ktime_get()); 283 record_it: 284 /* 285 * These two are not needed in ftrace as they are in the 286 * generic trace_entry, filled by tracing_generic_entry_update, 287 * but for the trace_event->bin() synthesizer benefit we do it 288 * here too. 289 */ 290 t->cpu = cpu; 291 t->pid = pid; 292 293 t->sector = sector; 294 t->bytes = bytes; 295 t->action = what; 296 t->device = bt->dev; 297 t->error = error; 298 t->pdu_len = pdu_len + cgid_len; 299 300 if (cgid_len) 301 memcpy((void *)t + sizeof(*t), &cgid, cgid_len); 302 if (pdu_len) 303 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len); 304 305 if (blk_tracer) { 306 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx); 307 return; 308 } 309 } 310 311 local_irq_restore(flags); 312 } 313 314 static void blk_trace_free(struct request_queue *q, struct blk_trace *bt) 315 { 316 relay_close(bt->rchan); 317 318 /* 319 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created 320 * under 'q->debugfs_dir', thus lookup and remove them. 321 */ 322 if (!bt->dir) { 323 debugfs_lookup_and_remove("dropped", q->debugfs_dir); 324 debugfs_lookup_and_remove("msg", q->debugfs_dir); 325 } else { 326 debugfs_remove(bt->dir); 327 } 328 free_percpu(bt->sequence); 329 free_percpu(bt->msg_data); 330 kfree(bt); 331 } 332 333 static void get_probe_ref(void) 334 { 335 mutex_lock(&blk_probe_mutex); 336 if (++blk_probes_ref == 1) 337 blk_register_tracepoints(); 338 mutex_unlock(&blk_probe_mutex); 339 } 340 341 static void put_probe_ref(void) 342 { 343 mutex_lock(&blk_probe_mutex); 344 if (!--blk_probes_ref) 345 blk_unregister_tracepoints(); 346 mutex_unlock(&blk_probe_mutex); 347 } 348 349 static int blk_trace_start(struct blk_trace *bt) 350 { 351 if (bt->trace_state != Blktrace_setup && 352 bt->trace_state != Blktrace_stopped) 353 return -EINVAL; 354 355 blktrace_seq++; 356 smp_mb(); 357 bt->trace_state = Blktrace_running; 358 raw_spin_lock_irq(&running_trace_lock); 359 list_add(&bt->running_list, &running_trace_list); 360 raw_spin_unlock_irq(&running_trace_lock); 361 trace_note_time(bt); 362 363 return 0; 364 } 365 366 static int blk_trace_stop(struct blk_trace *bt) 367 { 368 if (bt->trace_state != Blktrace_running) 369 return -EINVAL; 370 371 bt->trace_state = Blktrace_stopped; 372 raw_spin_lock_irq(&running_trace_lock); 373 list_del_init(&bt->running_list); 374 raw_spin_unlock_irq(&running_trace_lock); 375 relay_flush(bt->rchan); 376 377 return 0; 378 } 379 380 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt) 381 { 382 blk_trace_stop(bt); 383 synchronize_rcu(); 384 blk_trace_free(q, bt); 385 put_probe_ref(); 386 } 387 388 static int __blk_trace_remove(struct request_queue *q) 389 { 390 struct blk_trace *bt; 391 392 bt = rcu_replace_pointer(q->blk_trace, NULL, 393 lockdep_is_held(&q->debugfs_mutex)); 394 if (!bt) 395 return -EINVAL; 396 397 blk_trace_cleanup(q, bt); 398 399 return 0; 400 } 401 402 int blk_trace_remove(struct request_queue *q) 403 { 404 int ret; 405 406 mutex_lock(&q->debugfs_mutex); 407 ret = __blk_trace_remove(q); 408 mutex_unlock(&q->debugfs_mutex); 409 410 return ret; 411 } 412 EXPORT_SYMBOL_GPL(blk_trace_remove); 413 414 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, 415 size_t count, loff_t *ppos) 416 { 417 struct blk_trace *bt = filp->private_data; 418 char buf[16]; 419 420 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); 421 422 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); 423 } 424 425 static const struct file_operations blk_dropped_fops = { 426 .owner = THIS_MODULE, 427 .open = simple_open, 428 .read = blk_dropped_read, 429 .llseek = default_llseek, 430 }; 431 432 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, 433 size_t count, loff_t *ppos) 434 { 435 char *msg; 436 struct blk_trace *bt; 437 438 if (count >= BLK_TN_MAX_MSG) 439 return -EINVAL; 440 441 msg = memdup_user_nul(buffer, count); 442 if (IS_ERR(msg)) 443 return PTR_ERR(msg); 444 445 bt = filp->private_data; 446 __blk_trace_note_message(bt, NULL, "%s", msg); 447 kfree(msg); 448 449 return count; 450 } 451 452 static const struct file_operations blk_msg_fops = { 453 .owner = THIS_MODULE, 454 .open = simple_open, 455 .write = blk_msg_write, 456 .llseek = noop_llseek, 457 }; 458 459 /* 460 * Keep track of how many times we encountered a full subbuffer, to aid 461 * the user space app in telling how many lost events there were. 462 */ 463 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, 464 void *prev_subbuf, size_t prev_padding) 465 { 466 struct blk_trace *bt; 467 468 if (!relay_buf_full(buf)) 469 return 1; 470 471 bt = buf->chan->private_data; 472 atomic_inc(&bt->dropped); 473 return 0; 474 } 475 476 static int blk_remove_buf_file_callback(struct dentry *dentry) 477 { 478 debugfs_remove(dentry); 479 480 return 0; 481 } 482 483 static struct dentry *blk_create_buf_file_callback(const char *filename, 484 struct dentry *parent, 485 umode_t mode, 486 struct rchan_buf *buf, 487 int *is_global) 488 { 489 return debugfs_create_file(filename, mode, parent, buf, 490 &relay_file_operations); 491 } 492 493 static const struct rchan_callbacks blk_relay_callbacks = { 494 .subbuf_start = blk_subbuf_start_callback, 495 .create_buf_file = blk_create_buf_file_callback, 496 .remove_buf_file = blk_remove_buf_file_callback, 497 }; 498 499 static void blk_trace_setup_lba(struct blk_trace *bt, 500 struct block_device *bdev) 501 { 502 if (bdev) { 503 bt->start_lba = bdev->bd_start_sect; 504 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev); 505 } else { 506 bt->start_lba = 0; 507 bt->end_lba = -1ULL; 508 } 509 } 510 511 /* 512 * Setup everything required to start tracing 513 */ 514 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 515 struct block_device *bdev, 516 struct blk_user_trace_setup *buts) 517 { 518 struct blk_trace *bt = NULL; 519 struct dentry *dir = NULL; 520 int ret; 521 522 lockdep_assert_held(&q->debugfs_mutex); 523 524 if (!buts->buf_size || !buts->buf_nr) 525 return -EINVAL; 526 527 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); 528 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; 529 530 /* 531 * some device names have larger paths - convert the slashes 532 * to underscores for this to work as expected 533 */ 534 strreplace(buts->name, '/', '_'); 535 536 /* 537 * bdev can be NULL, as with scsi-generic, this is a helpful as 538 * we can be. 539 */ 540 if (rcu_dereference_protected(q->blk_trace, 541 lockdep_is_held(&q->debugfs_mutex))) { 542 pr_warn("Concurrent blktraces are not allowed on %s\n", 543 buts->name); 544 return -EBUSY; 545 } 546 547 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 548 if (!bt) 549 return -ENOMEM; 550 551 ret = -ENOMEM; 552 bt->sequence = alloc_percpu(unsigned long); 553 if (!bt->sequence) 554 goto err; 555 556 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); 557 if (!bt->msg_data) 558 goto err; 559 560 /* 561 * When tracing the whole disk reuse the existing debugfs directory 562 * created by the block layer on init. For partitions block devices, 563 * and scsi-generic block devices we create a temporary new debugfs 564 * directory that will be removed once the trace ends. 565 */ 566 if (bdev && !bdev_is_partition(bdev)) 567 dir = q->debugfs_dir; 568 else 569 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); 570 571 /* 572 * As blktrace relies on debugfs for its interface the debugfs directory 573 * is required, contrary to the usual mantra of not checking for debugfs 574 * files or directories. 575 */ 576 if (IS_ERR_OR_NULL(dir)) { 577 pr_warn("debugfs_dir not present for %s so skipping\n", 578 buts->name); 579 ret = -ENOENT; 580 goto err; 581 } 582 583 bt->dev = dev; 584 atomic_set(&bt->dropped, 0); 585 INIT_LIST_HEAD(&bt->running_list); 586 587 ret = -EIO; 588 debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); 589 debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); 590 591 bt->rchan = relay_open("trace", dir, buts->buf_size, 592 buts->buf_nr, &blk_relay_callbacks, bt); 593 if (!bt->rchan) 594 goto err; 595 596 bt->act_mask = buts->act_mask; 597 if (!bt->act_mask) 598 bt->act_mask = (u16) -1; 599 600 blk_trace_setup_lba(bt, bdev); 601 602 /* overwrite with user settings */ 603 if (buts->start_lba) 604 bt->start_lba = buts->start_lba; 605 if (buts->end_lba) 606 bt->end_lba = buts->end_lba; 607 608 bt->pid = buts->pid; 609 bt->trace_state = Blktrace_setup; 610 611 rcu_assign_pointer(q->blk_trace, bt); 612 get_probe_ref(); 613 614 ret = 0; 615 err: 616 if (ret) 617 blk_trace_free(q, bt); 618 return ret; 619 } 620 621 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 622 struct block_device *bdev, char __user *arg) 623 { 624 struct blk_user_trace_setup buts; 625 int ret; 626 627 ret = copy_from_user(&buts, arg, sizeof(buts)); 628 if (ret) 629 return -EFAULT; 630 631 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 632 if (ret) 633 return ret; 634 635 if (copy_to_user(arg, &buts, sizeof(buts))) { 636 __blk_trace_remove(q); 637 return -EFAULT; 638 } 639 return 0; 640 } 641 642 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 643 struct block_device *bdev, 644 char __user *arg) 645 { 646 int ret; 647 648 mutex_lock(&q->debugfs_mutex); 649 ret = __blk_trace_setup(q, name, dev, bdev, arg); 650 mutex_unlock(&q->debugfs_mutex); 651 652 return ret; 653 } 654 EXPORT_SYMBOL_GPL(blk_trace_setup); 655 656 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 657 static int compat_blk_trace_setup(struct request_queue *q, char *name, 658 dev_t dev, struct block_device *bdev, 659 char __user *arg) 660 { 661 struct blk_user_trace_setup buts; 662 struct compat_blk_user_trace_setup cbuts; 663 int ret; 664 665 if (copy_from_user(&cbuts, arg, sizeof(cbuts))) 666 return -EFAULT; 667 668 buts = (struct blk_user_trace_setup) { 669 .act_mask = cbuts.act_mask, 670 .buf_size = cbuts.buf_size, 671 .buf_nr = cbuts.buf_nr, 672 .start_lba = cbuts.start_lba, 673 .end_lba = cbuts.end_lba, 674 .pid = cbuts.pid, 675 }; 676 677 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 678 if (ret) 679 return ret; 680 681 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { 682 __blk_trace_remove(q); 683 return -EFAULT; 684 } 685 686 return 0; 687 } 688 #endif 689 690 static int __blk_trace_startstop(struct request_queue *q, int start) 691 { 692 struct blk_trace *bt; 693 694 bt = rcu_dereference_protected(q->blk_trace, 695 lockdep_is_held(&q->debugfs_mutex)); 696 if (bt == NULL) 697 return -EINVAL; 698 699 if (start) 700 return blk_trace_start(bt); 701 else 702 return blk_trace_stop(bt); 703 } 704 705 int blk_trace_startstop(struct request_queue *q, int start) 706 { 707 int ret; 708 709 mutex_lock(&q->debugfs_mutex); 710 ret = __blk_trace_startstop(q, start); 711 mutex_unlock(&q->debugfs_mutex); 712 713 return ret; 714 } 715 EXPORT_SYMBOL_GPL(blk_trace_startstop); 716 717 /* 718 * When reading or writing the blktrace sysfs files, the references to the 719 * opened sysfs or device files should prevent the underlying block device 720 * from being removed. So no further delete protection is really needed. 721 */ 722 723 /** 724 * blk_trace_ioctl - handle the ioctls associated with tracing 725 * @bdev: the block device 726 * @cmd: the ioctl cmd 727 * @arg: the argument data, if any 728 * 729 **/ 730 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) 731 { 732 struct request_queue *q = bdev_get_queue(bdev); 733 int ret, start = 0; 734 char b[BDEVNAME_SIZE]; 735 736 mutex_lock(&q->debugfs_mutex); 737 738 switch (cmd) { 739 case BLKTRACESETUP: 740 snprintf(b, sizeof(b), "%pg", bdev); 741 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 742 break; 743 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 744 case BLKTRACESETUP32: 745 snprintf(b, sizeof(b), "%pg", bdev); 746 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 747 break; 748 #endif 749 case BLKTRACESTART: 750 start = 1; 751 fallthrough; 752 case BLKTRACESTOP: 753 ret = __blk_trace_startstop(q, start); 754 break; 755 case BLKTRACETEARDOWN: 756 ret = __blk_trace_remove(q); 757 break; 758 default: 759 ret = -ENOTTY; 760 break; 761 } 762 763 mutex_unlock(&q->debugfs_mutex); 764 return ret; 765 } 766 767 /** 768 * blk_trace_shutdown - stop and cleanup trace structures 769 * @q: the request queue associated with the device 770 * 771 **/ 772 void blk_trace_shutdown(struct request_queue *q) 773 { 774 if (rcu_dereference_protected(q->blk_trace, 775 lockdep_is_held(&q->debugfs_mutex))) 776 __blk_trace_remove(q); 777 } 778 779 #ifdef CONFIG_BLK_CGROUP 780 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 781 { 782 struct cgroup_subsys_state *blkcg_css; 783 struct blk_trace *bt; 784 785 /* We don't use the 'bt' value here except as an optimization... */ 786 bt = rcu_dereference_protected(q->blk_trace, 1); 787 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 788 return 0; 789 790 blkcg_css = bio_blkcg_css(bio); 791 if (!blkcg_css) 792 return 0; 793 return cgroup_id(blkcg_css->cgroup); 794 } 795 #else 796 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 797 { 798 return 0; 799 } 800 #endif 801 802 static u64 803 blk_trace_request_get_cgid(struct request *rq) 804 { 805 if (!rq->bio) 806 return 0; 807 /* Use the first bio */ 808 return blk_trace_bio_get_cgid(rq->q, rq->bio); 809 } 810 811 /* 812 * blktrace probes 813 */ 814 815 /** 816 * blk_add_trace_rq - Add a trace for a request oriented action 817 * @rq: the source request 818 * @error: return status to log 819 * @nr_bytes: number of completed bytes 820 * @what: the action 821 * @cgid: the cgroup info 822 * 823 * Description: 824 * Records an action against a request. Will log the bio offset + size. 825 * 826 **/ 827 static void blk_add_trace_rq(struct request *rq, blk_status_t error, 828 unsigned int nr_bytes, u32 what, u64 cgid) 829 { 830 struct blk_trace *bt; 831 832 rcu_read_lock(); 833 bt = rcu_dereference(rq->q->blk_trace); 834 if (likely(!bt)) { 835 rcu_read_unlock(); 836 return; 837 } 838 839 if (blk_rq_is_passthrough(rq)) 840 what |= BLK_TC_ACT(BLK_TC_PC); 841 else 842 what |= BLK_TC_ACT(BLK_TC_FS); 843 844 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags, 845 what, blk_status_to_errno(error), 0, NULL, cgid); 846 rcu_read_unlock(); 847 } 848 849 static void blk_add_trace_rq_insert(void *ignore, struct request *rq) 850 { 851 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, 852 blk_trace_request_get_cgid(rq)); 853 } 854 855 static void blk_add_trace_rq_issue(void *ignore, struct request *rq) 856 { 857 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE, 858 blk_trace_request_get_cgid(rq)); 859 } 860 861 static void blk_add_trace_rq_merge(void *ignore, struct request *rq) 862 { 863 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE, 864 blk_trace_request_get_cgid(rq)); 865 } 866 867 static void blk_add_trace_rq_requeue(void *ignore, struct request *rq) 868 { 869 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE, 870 blk_trace_request_get_cgid(rq)); 871 } 872 873 static void blk_add_trace_rq_complete(void *ignore, struct request *rq, 874 blk_status_t error, unsigned int nr_bytes) 875 { 876 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE, 877 blk_trace_request_get_cgid(rq)); 878 } 879 880 /** 881 * blk_add_trace_bio - Add a trace for a bio oriented action 882 * @q: queue the io is for 883 * @bio: the source bio 884 * @what: the action 885 * @error: error, if any 886 * 887 * Description: 888 * Records an action against a bio. Will log the bio offset + size. 889 * 890 **/ 891 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, 892 u32 what, int error) 893 { 894 struct blk_trace *bt; 895 896 rcu_read_lock(); 897 bt = rcu_dereference(q->blk_trace); 898 if (likely(!bt)) { 899 rcu_read_unlock(); 900 return; 901 } 902 903 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 904 bio->bi_opf, what, error, 0, NULL, 905 blk_trace_bio_get_cgid(q, bio)); 906 rcu_read_unlock(); 907 } 908 909 static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio) 910 { 911 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0); 912 } 913 914 static void blk_add_trace_bio_complete(void *ignore, 915 struct request_queue *q, struct bio *bio) 916 { 917 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, 918 blk_status_to_errno(bio->bi_status)); 919 } 920 921 static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio) 922 { 923 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE, 924 0); 925 } 926 927 static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio) 928 { 929 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE, 930 0); 931 } 932 933 static void blk_add_trace_bio_queue(void *ignore, struct bio *bio) 934 { 935 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0); 936 } 937 938 static void blk_add_trace_getrq(void *ignore, struct bio *bio) 939 { 940 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0); 941 } 942 943 static void blk_add_trace_plug(void *ignore, struct request_queue *q) 944 { 945 struct blk_trace *bt; 946 947 rcu_read_lock(); 948 bt = rcu_dereference(q->blk_trace); 949 if (bt) 950 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0); 951 rcu_read_unlock(); 952 } 953 954 static void blk_add_trace_unplug(void *ignore, struct request_queue *q, 955 unsigned int depth, bool explicit) 956 { 957 struct blk_trace *bt; 958 959 rcu_read_lock(); 960 bt = rcu_dereference(q->blk_trace); 961 if (bt) { 962 __be64 rpdu = cpu_to_be64(depth); 963 u32 what; 964 965 if (explicit) 966 what = BLK_TA_UNPLUG_IO; 967 else 968 what = BLK_TA_UNPLUG_TIMER; 969 970 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0); 971 } 972 rcu_read_unlock(); 973 } 974 975 static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu) 976 { 977 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 978 struct blk_trace *bt; 979 980 rcu_read_lock(); 981 bt = rcu_dereference(q->blk_trace); 982 if (bt) { 983 __be64 rpdu = cpu_to_be64(pdu); 984 985 __blk_add_trace(bt, bio->bi_iter.bi_sector, 986 bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT, 987 blk_status_to_errno(bio->bi_status), 988 sizeof(rpdu), &rpdu, 989 blk_trace_bio_get_cgid(q, bio)); 990 } 991 rcu_read_unlock(); 992 } 993 994 /** 995 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation 996 * @ignore: trace callback data parameter (not used) 997 * @bio: the source bio 998 * @dev: source device 999 * @from: source sector 1000 * 1001 * Called after a bio is remapped to a different device and/or sector. 1002 **/ 1003 static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev, 1004 sector_t from) 1005 { 1006 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 1007 struct blk_trace *bt; 1008 struct blk_io_trace_remap r; 1009 1010 rcu_read_lock(); 1011 bt = rcu_dereference(q->blk_trace); 1012 if (likely(!bt)) { 1013 rcu_read_unlock(); 1014 return; 1015 } 1016 1017 r.device_from = cpu_to_be32(dev); 1018 r.device_to = cpu_to_be32(bio_dev(bio)); 1019 r.sector_from = cpu_to_be64(from); 1020 1021 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 1022 bio->bi_opf, BLK_TA_REMAP, 1023 blk_status_to_errno(bio->bi_status), 1024 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); 1025 rcu_read_unlock(); 1026 } 1027 1028 /** 1029 * blk_add_trace_rq_remap - Add a trace for a request-remap operation 1030 * @ignore: trace callback data parameter (not used) 1031 * @rq: the source request 1032 * @dev: target device 1033 * @from: source sector 1034 * 1035 * Description: 1036 * Device mapper remaps request to other devices. 1037 * Add a trace for that action. 1038 * 1039 **/ 1040 static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, 1041 sector_t from) 1042 { 1043 struct blk_trace *bt; 1044 struct blk_io_trace_remap r; 1045 1046 rcu_read_lock(); 1047 bt = rcu_dereference(rq->q->blk_trace); 1048 if (likely(!bt)) { 1049 rcu_read_unlock(); 1050 return; 1051 } 1052 1053 r.device_from = cpu_to_be32(dev); 1054 r.device_to = cpu_to_be32(disk_devt(rq->q->disk)); 1055 r.sector_from = cpu_to_be64(from); 1056 1057 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 1058 rq->cmd_flags, BLK_TA_REMAP, 0, 1059 sizeof(r), &r, blk_trace_request_get_cgid(rq)); 1060 rcu_read_unlock(); 1061 } 1062 1063 /** 1064 * blk_add_driver_data - Add binary message with driver-specific data 1065 * @rq: io request 1066 * @data: driver-specific data 1067 * @len: length of driver-specific data 1068 * 1069 * Description: 1070 * Some drivers might want to write driver-specific data per request. 1071 * 1072 **/ 1073 void blk_add_driver_data(struct request *rq, void *data, size_t len) 1074 { 1075 struct blk_trace *bt; 1076 1077 rcu_read_lock(); 1078 bt = rcu_dereference(rq->q->blk_trace); 1079 if (likely(!bt)) { 1080 rcu_read_unlock(); 1081 return; 1082 } 1083 1084 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 1085 BLK_TA_DRV_DATA, 0, len, data, 1086 blk_trace_request_get_cgid(rq)); 1087 rcu_read_unlock(); 1088 } 1089 EXPORT_SYMBOL_GPL(blk_add_driver_data); 1090 1091 static void blk_register_tracepoints(void) 1092 { 1093 int ret; 1094 1095 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); 1096 WARN_ON(ret); 1097 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); 1098 WARN_ON(ret); 1099 ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); 1100 WARN_ON(ret); 1101 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); 1102 WARN_ON(ret); 1103 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); 1104 WARN_ON(ret); 1105 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); 1106 WARN_ON(ret); 1107 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); 1108 WARN_ON(ret); 1109 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); 1110 WARN_ON(ret); 1111 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); 1112 WARN_ON(ret); 1113 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); 1114 WARN_ON(ret); 1115 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL); 1116 WARN_ON(ret); 1117 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1118 WARN_ON(ret); 1119 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); 1120 WARN_ON(ret); 1121 ret = register_trace_block_split(blk_add_trace_split, NULL); 1122 WARN_ON(ret); 1123 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1124 WARN_ON(ret); 1125 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1126 WARN_ON(ret); 1127 } 1128 1129 static void blk_unregister_tracepoints(void) 1130 { 1131 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1132 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1133 unregister_trace_block_split(blk_add_trace_split, NULL); 1134 unregister_trace_block_unplug(blk_add_trace_unplug, NULL); 1135 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1136 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1137 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL); 1138 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); 1139 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); 1140 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); 1141 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); 1142 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); 1143 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); 1144 unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); 1145 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); 1146 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); 1147 1148 tracepoint_synchronize_unregister(); 1149 } 1150 1151 /* 1152 * struct blk_io_tracer formatting routines 1153 */ 1154 1155 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) 1156 { 1157 int i = 0; 1158 int tc = t->action >> BLK_TC_SHIFT; 1159 1160 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1161 rwbs[i++] = 'N'; 1162 goto out; 1163 } 1164 1165 if (tc & BLK_TC_FLUSH) 1166 rwbs[i++] = 'F'; 1167 1168 if (tc & BLK_TC_DISCARD) 1169 rwbs[i++] = 'D'; 1170 else if (tc & BLK_TC_WRITE) 1171 rwbs[i++] = 'W'; 1172 else if (t->bytes) 1173 rwbs[i++] = 'R'; 1174 else 1175 rwbs[i++] = 'N'; 1176 1177 if (tc & BLK_TC_FUA) 1178 rwbs[i++] = 'F'; 1179 if (tc & BLK_TC_AHEAD) 1180 rwbs[i++] = 'A'; 1181 if (tc & BLK_TC_SYNC) 1182 rwbs[i++] = 'S'; 1183 if (tc & BLK_TC_META) 1184 rwbs[i++] = 'M'; 1185 out: 1186 rwbs[i] = '\0'; 1187 } 1188 1189 static inline 1190 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) 1191 { 1192 return (const struct blk_io_trace *)ent; 1193 } 1194 1195 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg) 1196 { 1197 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0); 1198 } 1199 1200 static inline u64 t_cgid(const struct trace_entry *ent) 1201 { 1202 return *(u64 *)(te_blk_io_trace(ent) + 1); 1203 } 1204 1205 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg) 1206 { 1207 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0); 1208 } 1209 1210 static inline u32 t_action(const struct trace_entry *ent) 1211 { 1212 return te_blk_io_trace(ent)->action; 1213 } 1214 1215 static inline u32 t_bytes(const struct trace_entry *ent) 1216 { 1217 return te_blk_io_trace(ent)->bytes; 1218 } 1219 1220 static inline u32 t_sec(const struct trace_entry *ent) 1221 { 1222 return te_blk_io_trace(ent)->bytes >> 9; 1223 } 1224 1225 static inline unsigned long long t_sector(const struct trace_entry *ent) 1226 { 1227 return te_blk_io_trace(ent)->sector; 1228 } 1229 1230 static inline __u16 t_error(const struct trace_entry *ent) 1231 { 1232 return te_blk_io_trace(ent)->error; 1233 } 1234 1235 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) 1236 { 1237 const __be64 *val = pdu_start(ent, has_cg); 1238 return be64_to_cpu(*val); 1239 } 1240 1241 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, 1242 bool has_cg); 1243 1244 static void blk_log_action_classic(struct trace_iterator *iter, const char *act, 1245 bool has_cg) 1246 { 1247 char rwbs[RWBS_LEN]; 1248 unsigned long long ts = iter->ts; 1249 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); 1250 unsigned secs = (unsigned long)ts; 1251 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1252 1253 fill_rwbs(rwbs, t); 1254 1255 trace_seq_printf(&iter->seq, 1256 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", 1257 MAJOR(t->device), MINOR(t->device), iter->cpu, 1258 secs, nsec_rem, iter->ent->pid, act, rwbs); 1259 } 1260 1261 static void blk_log_action(struct trace_iterator *iter, const char *act, 1262 bool has_cg) 1263 { 1264 char rwbs[RWBS_LEN]; 1265 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1266 1267 fill_rwbs(rwbs, t); 1268 if (has_cg) { 1269 u64 id = t_cgid(iter->ent); 1270 1271 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) { 1272 char blkcg_name_buf[NAME_MAX + 1] = "<...>"; 1273 1274 cgroup_path_from_kernfs_id(id, blkcg_name_buf, 1275 sizeof(blkcg_name_buf)); 1276 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ", 1277 MAJOR(t->device), MINOR(t->device), 1278 blkcg_name_buf, act, rwbs); 1279 } else { 1280 /* 1281 * The cgid portion used to be "INO,GEN". Userland 1282 * builds a FILEID_INO32_GEN fid out of them and 1283 * opens the cgroup using open_by_handle_at(2). 1284 * While 32bit ino setups are still the same, 64bit 1285 * ones now use the 64bit ino as the whole ID and 1286 * no longer use generation. 1287 * 1288 * Regardless of the content, always output 1289 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can 1290 * be mapped back to @id on both 64 and 32bit ino 1291 * setups. See __kernfs_fh_to_dentry(). 1292 */ 1293 trace_seq_printf(&iter->seq, 1294 "%3d,%-3d %llx,%-llx %2s %3s ", 1295 MAJOR(t->device), MINOR(t->device), 1296 id & U32_MAX, id >> 32, act, rwbs); 1297 } 1298 } else 1299 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", 1300 MAJOR(t->device), MINOR(t->device), act, rwbs); 1301 } 1302 1303 static void blk_log_dump_pdu(struct trace_seq *s, 1304 const struct trace_entry *ent, bool has_cg) 1305 { 1306 const unsigned char *pdu_buf; 1307 int pdu_len; 1308 int i, end; 1309 1310 pdu_buf = pdu_start(ent, has_cg); 1311 pdu_len = pdu_real_len(ent, has_cg); 1312 1313 if (!pdu_len) 1314 return; 1315 1316 /* find the last zero that needs to be printed */ 1317 for (end = pdu_len - 1; end >= 0; end--) 1318 if (pdu_buf[end]) 1319 break; 1320 end++; 1321 1322 trace_seq_putc(s, '('); 1323 1324 for (i = 0; i < pdu_len; i++) { 1325 1326 trace_seq_printf(s, "%s%02x", 1327 i == 0 ? "" : " ", pdu_buf[i]); 1328 1329 /* 1330 * stop when the rest is just zeros and indicate so 1331 * with a ".." appended 1332 */ 1333 if (i == end && end != pdu_len - 1) { 1334 trace_seq_puts(s, " ..) "); 1335 return; 1336 } 1337 } 1338 1339 trace_seq_puts(s, ") "); 1340 } 1341 1342 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1343 { 1344 char cmd[TASK_COMM_LEN]; 1345 1346 trace_find_cmdline(ent->pid, cmd); 1347 1348 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1349 trace_seq_printf(s, "%u ", t_bytes(ent)); 1350 blk_log_dump_pdu(s, ent, has_cg); 1351 trace_seq_printf(s, "[%s]\n", cmd); 1352 } else { 1353 if (t_sec(ent)) 1354 trace_seq_printf(s, "%llu + %u [%s]\n", 1355 t_sector(ent), t_sec(ent), cmd); 1356 else 1357 trace_seq_printf(s, "[%s]\n", cmd); 1358 } 1359 } 1360 1361 static void blk_log_with_error(struct trace_seq *s, 1362 const struct trace_entry *ent, bool has_cg) 1363 { 1364 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1365 blk_log_dump_pdu(s, ent, has_cg); 1366 trace_seq_printf(s, "[%d]\n", t_error(ent)); 1367 } else { 1368 if (t_sec(ent)) 1369 trace_seq_printf(s, "%llu + %u [%d]\n", 1370 t_sector(ent), 1371 t_sec(ent), t_error(ent)); 1372 else 1373 trace_seq_printf(s, "%llu [%d]\n", 1374 t_sector(ent), t_error(ent)); 1375 } 1376 } 1377 1378 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1379 { 1380 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); 1381 1382 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", 1383 t_sector(ent), t_sec(ent), 1384 MAJOR(be32_to_cpu(__r->device_from)), 1385 MINOR(be32_to_cpu(__r->device_from)), 1386 be64_to_cpu(__r->sector_from)); 1387 } 1388 1389 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1390 { 1391 char cmd[TASK_COMM_LEN]; 1392 1393 trace_find_cmdline(ent->pid, cmd); 1394 1395 trace_seq_printf(s, "[%s]\n", cmd); 1396 } 1397 1398 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1399 { 1400 char cmd[TASK_COMM_LEN]; 1401 1402 trace_find_cmdline(ent->pid, cmd); 1403 1404 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg)); 1405 } 1406 1407 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1408 { 1409 char cmd[TASK_COMM_LEN]; 1410 1411 trace_find_cmdline(ent->pid, cmd); 1412 1413 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), 1414 get_pdu_int(ent, has_cg), cmd); 1415 } 1416 1417 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent, 1418 bool has_cg) 1419 { 1420 1421 trace_seq_putmem(s, pdu_start(ent, has_cg), 1422 pdu_real_len(ent, has_cg)); 1423 trace_seq_putc(s, '\n'); 1424 } 1425 1426 /* 1427 * struct tracer operations 1428 */ 1429 1430 static void blk_tracer_print_header(struct seq_file *m) 1431 { 1432 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) 1433 return; 1434 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" 1435 "# | | | | | |\n"); 1436 } 1437 1438 static void blk_tracer_start(struct trace_array *tr) 1439 { 1440 blk_tracer_enabled = true; 1441 } 1442 1443 static int blk_tracer_init(struct trace_array *tr) 1444 { 1445 blk_tr = tr; 1446 blk_tracer_start(tr); 1447 return 0; 1448 } 1449 1450 static void blk_tracer_stop(struct trace_array *tr) 1451 { 1452 blk_tracer_enabled = false; 1453 } 1454 1455 static void blk_tracer_reset(struct trace_array *tr) 1456 { 1457 blk_tracer_stop(tr); 1458 } 1459 1460 static const struct { 1461 const char *act[2]; 1462 void (*print)(struct trace_seq *s, const struct trace_entry *ent, 1463 bool has_cg); 1464 } what2act[] = { 1465 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, 1466 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, 1467 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, 1468 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, 1469 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, 1470 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, 1471 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, 1472 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1473 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1474 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1475 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, 1476 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1477 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1478 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, 1479 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, 1480 }; 1481 1482 static enum print_line_t print_one_line(struct trace_iterator *iter, 1483 bool classic) 1484 { 1485 struct trace_array *tr = iter->tr; 1486 struct trace_seq *s = &iter->seq; 1487 const struct blk_io_trace *t; 1488 u16 what; 1489 bool long_act; 1490 blk_log_action_t *log_action; 1491 bool has_cg; 1492 1493 t = te_blk_io_trace(iter->ent); 1494 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP; 1495 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE); 1496 log_action = classic ? &blk_log_action_classic : &blk_log_action; 1497 has_cg = t->action & __BLK_TA_CGROUP; 1498 1499 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1500 log_action(iter, long_act ? "message" : "m", has_cg); 1501 blk_log_msg(s, iter->ent, has_cg); 1502 return trace_handle_return(s); 1503 } 1504 1505 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) 1506 trace_seq_printf(s, "Unknown action %x\n", what); 1507 else { 1508 log_action(iter, what2act[what].act[long_act], has_cg); 1509 what2act[what].print(s, iter->ent, has_cg); 1510 } 1511 1512 return trace_handle_return(s); 1513 } 1514 1515 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, 1516 int flags, struct trace_event *event) 1517 { 1518 return print_one_line(iter, false); 1519 } 1520 1521 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) 1522 { 1523 struct trace_seq *s = &iter->seq; 1524 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; 1525 const int offset = offsetof(struct blk_io_trace, sector); 1526 struct blk_io_trace old = { 1527 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, 1528 .time = iter->ts, 1529 }; 1530 1531 trace_seq_putmem(s, &old, offset); 1532 trace_seq_putmem(s, &t->sector, 1533 sizeof(old) - offset + t->pdu_len); 1534 } 1535 1536 static enum print_line_t 1537 blk_trace_event_print_binary(struct trace_iterator *iter, int flags, 1538 struct trace_event *event) 1539 { 1540 blk_trace_synthesize_old_trace(iter); 1541 1542 return trace_handle_return(&iter->seq); 1543 } 1544 1545 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) 1546 { 1547 if ((iter->ent->type != TRACE_BLK) || 1548 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) 1549 return TRACE_TYPE_UNHANDLED; 1550 1551 return print_one_line(iter, true); 1552 } 1553 1554 static int 1555 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 1556 { 1557 /* don't output context-info for blk_classic output */ 1558 if (bit == TRACE_BLK_OPT_CLASSIC) { 1559 if (set) 1560 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO; 1561 else 1562 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO; 1563 } 1564 return 0; 1565 } 1566 1567 static struct tracer blk_tracer __read_mostly = { 1568 .name = "blk", 1569 .init = blk_tracer_init, 1570 .reset = blk_tracer_reset, 1571 .start = blk_tracer_start, 1572 .stop = blk_tracer_stop, 1573 .print_header = blk_tracer_print_header, 1574 .print_line = blk_tracer_print_line, 1575 .flags = &blk_tracer_flags, 1576 .set_flag = blk_tracer_set_flag, 1577 }; 1578 1579 static struct trace_event_functions trace_blk_event_funcs = { 1580 .trace = blk_trace_event_print, 1581 .binary = blk_trace_event_print_binary, 1582 }; 1583 1584 static struct trace_event trace_blk_event = { 1585 .type = TRACE_BLK, 1586 .funcs = &trace_blk_event_funcs, 1587 }; 1588 1589 static int __init init_blk_tracer(void) 1590 { 1591 if (!register_trace_event(&trace_blk_event)) { 1592 pr_warn("Warning: could not register block events\n"); 1593 return 1; 1594 } 1595 1596 if (register_tracer(&blk_tracer) != 0) { 1597 pr_warn("Warning: could not register the block tracer\n"); 1598 unregister_trace_event(&trace_blk_event); 1599 return 1; 1600 } 1601 1602 return 0; 1603 } 1604 1605 device_initcall(init_blk_tracer); 1606 1607 static int blk_trace_remove_queue(struct request_queue *q) 1608 { 1609 struct blk_trace *bt; 1610 1611 bt = rcu_replace_pointer(q->blk_trace, NULL, 1612 lockdep_is_held(&q->debugfs_mutex)); 1613 if (bt == NULL) 1614 return -EINVAL; 1615 1616 blk_trace_stop(bt); 1617 1618 put_probe_ref(); 1619 synchronize_rcu(); 1620 blk_trace_free(q, bt); 1621 return 0; 1622 } 1623 1624 /* 1625 * Setup everything required to start tracing 1626 */ 1627 static int blk_trace_setup_queue(struct request_queue *q, 1628 struct block_device *bdev) 1629 { 1630 struct blk_trace *bt = NULL; 1631 int ret = -ENOMEM; 1632 1633 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 1634 if (!bt) 1635 return -ENOMEM; 1636 1637 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); 1638 if (!bt->msg_data) 1639 goto free_bt; 1640 1641 bt->dev = bdev->bd_dev; 1642 bt->act_mask = (u16)-1; 1643 1644 blk_trace_setup_lba(bt, bdev); 1645 1646 rcu_assign_pointer(q->blk_trace, bt); 1647 get_probe_ref(); 1648 return 0; 1649 1650 free_bt: 1651 blk_trace_free(q, bt); 1652 return ret; 1653 } 1654 1655 /* 1656 * sysfs interface to enable and configure tracing 1657 */ 1658 1659 static ssize_t sysfs_blk_trace_attr_show(struct device *dev, 1660 struct device_attribute *attr, 1661 char *buf); 1662 static ssize_t sysfs_blk_trace_attr_store(struct device *dev, 1663 struct device_attribute *attr, 1664 const char *buf, size_t count); 1665 #define BLK_TRACE_DEVICE_ATTR(_name) \ 1666 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ 1667 sysfs_blk_trace_attr_show, \ 1668 sysfs_blk_trace_attr_store) 1669 1670 static BLK_TRACE_DEVICE_ATTR(enable); 1671 static BLK_TRACE_DEVICE_ATTR(act_mask); 1672 static BLK_TRACE_DEVICE_ATTR(pid); 1673 static BLK_TRACE_DEVICE_ATTR(start_lba); 1674 static BLK_TRACE_DEVICE_ATTR(end_lba); 1675 1676 static struct attribute *blk_trace_attrs[] = { 1677 &dev_attr_enable.attr, 1678 &dev_attr_act_mask.attr, 1679 &dev_attr_pid.attr, 1680 &dev_attr_start_lba.attr, 1681 &dev_attr_end_lba.attr, 1682 NULL 1683 }; 1684 1685 struct attribute_group blk_trace_attr_group = { 1686 .name = "trace", 1687 .attrs = blk_trace_attrs, 1688 }; 1689 1690 static const struct { 1691 int mask; 1692 const char *str; 1693 } mask_maps[] = { 1694 { BLK_TC_READ, "read" }, 1695 { BLK_TC_WRITE, "write" }, 1696 { BLK_TC_FLUSH, "flush" }, 1697 { BLK_TC_SYNC, "sync" }, 1698 { BLK_TC_QUEUE, "queue" }, 1699 { BLK_TC_REQUEUE, "requeue" }, 1700 { BLK_TC_ISSUE, "issue" }, 1701 { BLK_TC_COMPLETE, "complete" }, 1702 { BLK_TC_FS, "fs" }, 1703 { BLK_TC_PC, "pc" }, 1704 { BLK_TC_NOTIFY, "notify" }, 1705 { BLK_TC_AHEAD, "ahead" }, 1706 { BLK_TC_META, "meta" }, 1707 { BLK_TC_DISCARD, "discard" }, 1708 { BLK_TC_DRV_DATA, "drv_data" }, 1709 { BLK_TC_FUA, "fua" }, 1710 }; 1711 1712 static int blk_trace_str2mask(const char *str) 1713 { 1714 int i; 1715 int mask = 0; 1716 char *buf, *s, *token; 1717 1718 buf = kstrdup(str, GFP_KERNEL); 1719 if (buf == NULL) 1720 return -ENOMEM; 1721 s = strstrip(buf); 1722 1723 while (1) { 1724 token = strsep(&s, ","); 1725 if (token == NULL) 1726 break; 1727 1728 if (*token == '\0') 1729 continue; 1730 1731 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { 1732 if (strcasecmp(token, mask_maps[i].str) == 0) { 1733 mask |= mask_maps[i].mask; 1734 break; 1735 } 1736 } 1737 if (i == ARRAY_SIZE(mask_maps)) { 1738 mask = -EINVAL; 1739 break; 1740 } 1741 } 1742 kfree(buf); 1743 1744 return mask; 1745 } 1746 1747 static ssize_t blk_trace_mask2str(char *buf, int mask) 1748 { 1749 int i; 1750 char *p = buf; 1751 1752 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) { 1753 if (mask & mask_maps[i].mask) { 1754 p += sprintf(p, "%s%s", 1755 (p == buf) ? "" : ",", mask_maps[i].str); 1756 } 1757 } 1758 *p++ = '\n'; 1759 1760 return p - buf; 1761 } 1762 1763 static ssize_t sysfs_blk_trace_attr_show(struct device *dev, 1764 struct device_attribute *attr, 1765 char *buf) 1766 { 1767 struct block_device *bdev = dev_to_bdev(dev); 1768 struct request_queue *q = bdev_get_queue(bdev); 1769 struct blk_trace *bt; 1770 ssize_t ret = -ENXIO; 1771 1772 mutex_lock(&q->debugfs_mutex); 1773 1774 bt = rcu_dereference_protected(q->blk_trace, 1775 lockdep_is_held(&q->debugfs_mutex)); 1776 if (attr == &dev_attr_enable) { 1777 ret = sprintf(buf, "%u\n", !!bt); 1778 goto out_unlock_bdev; 1779 } 1780 1781 if (bt == NULL) 1782 ret = sprintf(buf, "disabled\n"); 1783 else if (attr == &dev_attr_act_mask) 1784 ret = blk_trace_mask2str(buf, bt->act_mask); 1785 else if (attr == &dev_attr_pid) 1786 ret = sprintf(buf, "%u\n", bt->pid); 1787 else if (attr == &dev_attr_start_lba) 1788 ret = sprintf(buf, "%llu\n", bt->start_lba); 1789 else if (attr == &dev_attr_end_lba) 1790 ret = sprintf(buf, "%llu\n", bt->end_lba); 1791 1792 out_unlock_bdev: 1793 mutex_unlock(&q->debugfs_mutex); 1794 return ret; 1795 } 1796 1797 static ssize_t sysfs_blk_trace_attr_store(struct device *dev, 1798 struct device_attribute *attr, 1799 const char *buf, size_t count) 1800 { 1801 struct block_device *bdev = dev_to_bdev(dev); 1802 struct request_queue *q = bdev_get_queue(bdev); 1803 struct blk_trace *bt; 1804 u64 value; 1805 ssize_t ret = -EINVAL; 1806 1807 if (count == 0) 1808 goto out; 1809 1810 if (attr == &dev_attr_act_mask) { 1811 if (kstrtoull(buf, 0, &value)) { 1812 /* Assume it is a list of trace category names */ 1813 ret = blk_trace_str2mask(buf); 1814 if (ret < 0) 1815 goto out; 1816 value = ret; 1817 } 1818 } else { 1819 if (kstrtoull(buf, 0, &value)) 1820 goto out; 1821 } 1822 1823 mutex_lock(&q->debugfs_mutex); 1824 1825 bt = rcu_dereference_protected(q->blk_trace, 1826 lockdep_is_held(&q->debugfs_mutex)); 1827 if (attr == &dev_attr_enable) { 1828 if (!!value == !!bt) { 1829 ret = 0; 1830 goto out_unlock_bdev; 1831 } 1832 if (value) 1833 ret = blk_trace_setup_queue(q, bdev); 1834 else 1835 ret = blk_trace_remove_queue(q); 1836 goto out_unlock_bdev; 1837 } 1838 1839 ret = 0; 1840 if (bt == NULL) { 1841 ret = blk_trace_setup_queue(q, bdev); 1842 bt = rcu_dereference_protected(q->blk_trace, 1843 lockdep_is_held(&q->debugfs_mutex)); 1844 } 1845 1846 if (ret == 0) { 1847 if (attr == &dev_attr_act_mask) 1848 bt->act_mask = value; 1849 else if (attr == &dev_attr_pid) 1850 bt->pid = value; 1851 else if (attr == &dev_attr_start_lba) 1852 bt->start_lba = value; 1853 else if (attr == &dev_attr_end_lba) 1854 bt->end_lba = value; 1855 } 1856 1857 out_unlock_bdev: 1858 mutex_unlock(&q->debugfs_mutex); 1859 out: 1860 return ret ? ret : count; 1861 } 1862 #endif /* CONFIG_BLK_DEV_IO_TRACE */ 1863 1864 #ifdef CONFIG_EVENT_TRACING 1865 1866 /** 1867 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string. 1868 * @rwbs: buffer to be filled 1869 * @opf: request operation type (REQ_OP_XXX) and flags for the tracepoint 1870 * 1871 * Description: 1872 * Maps each request operation and flag to a single character and fills the 1873 * buffer provided by the caller with resulting string. 1874 * 1875 **/ 1876 void blk_fill_rwbs(char *rwbs, blk_opf_t opf) 1877 { 1878 int i = 0; 1879 1880 if (opf & REQ_PREFLUSH) 1881 rwbs[i++] = 'F'; 1882 1883 switch (opf & REQ_OP_MASK) { 1884 case REQ_OP_WRITE: 1885 rwbs[i++] = 'W'; 1886 break; 1887 case REQ_OP_DISCARD: 1888 rwbs[i++] = 'D'; 1889 break; 1890 case REQ_OP_SECURE_ERASE: 1891 rwbs[i++] = 'D'; 1892 rwbs[i++] = 'E'; 1893 break; 1894 case REQ_OP_FLUSH: 1895 rwbs[i++] = 'F'; 1896 break; 1897 case REQ_OP_READ: 1898 rwbs[i++] = 'R'; 1899 break; 1900 default: 1901 rwbs[i++] = 'N'; 1902 } 1903 1904 if (opf & REQ_FUA) 1905 rwbs[i++] = 'F'; 1906 if (opf & REQ_RAHEAD) 1907 rwbs[i++] = 'A'; 1908 if (opf & REQ_SYNC) 1909 rwbs[i++] = 'S'; 1910 if (opf & REQ_META) 1911 rwbs[i++] = 'M'; 1912 1913 rwbs[i] = '\0'; 1914 } 1915 EXPORT_SYMBOL_GPL(blk_fill_rwbs); 1916 1917 #endif /* CONFIG_EVENT_TRACING */ 1918 1919