1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to sysfs handling 4 */ 5 #include <linux/kernel.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/backing-dev.h> 11 #include <linux/blktrace_api.h> 12 #include <linux/debugfs.h> 13 14 #include "blk.h" 15 #include "blk-mq.h" 16 #include "blk-mq-debugfs.h" 17 #include "blk-mq-sched.h" 18 #include "blk-rq-qos.h" 19 #include "blk-wbt.h" 20 #include "blk-cgroup.h" 21 #include "blk-throttle.h" 22 23 struct queue_sysfs_entry { 24 struct attribute attr; 25 ssize_t (*show)(struct request_queue *, char *); 26 ssize_t (*store)(struct request_queue *, const char *, size_t); 27 }; 28 29 static ssize_t 30 queue_var_show(unsigned long var, char *page) 31 { 32 return sprintf(page, "%lu\n", var); 33 } 34 35 static ssize_t 36 queue_var_store(unsigned long *var, const char *page, size_t count) 37 { 38 int err; 39 unsigned long v; 40 41 err = kstrtoul(page, 10, &v); 42 if (err || v > UINT_MAX) 43 return -EINVAL; 44 45 *var = v; 46 47 return count; 48 } 49 50 static ssize_t queue_requests_show(struct request_queue *q, char *page) 51 { 52 return queue_var_show(q->nr_requests, page); 53 } 54 55 static ssize_t 56 queue_requests_store(struct request_queue *q, const char *page, size_t count) 57 { 58 unsigned long nr; 59 int ret, err; 60 61 if (!queue_is_mq(q)) 62 return -EINVAL; 63 64 ret = queue_var_store(&nr, page, count); 65 if (ret < 0) 66 return ret; 67 68 if (nr < BLKDEV_MIN_RQ) 69 nr = BLKDEV_MIN_RQ; 70 71 err = blk_mq_update_nr_requests(q, nr); 72 if (err) 73 return err; 74 75 return ret; 76 } 77 78 static ssize_t queue_ra_show(struct request_queue *q, char *page) 79 { 80 unsigned long ra_kb; 81 82 if (!q->disk) 83 return -EINVAL; 84 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); 85 return queue_var_show(ra_kb, page); 86 } 87 88 static ssize_t 89 queue_ra_store(struct request_queue *q, const char *page, size_t count) 90 { 91 unsigned long ra_kb; 92 ssize_t ret; 93 94 if (!q->disk) 95 return -EINVAL; 96 ret = queue_var_store(&ra_kb, page, count); 97 if (ret < 0) 98 return ret; 99 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 100 return ret; 101 } 102 103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 104 { 105 int max_sectors_kb = queue_max_sectors(q) >> 1; 106 107 return queue_var_show(max_sectors_kb, page); 108 } 109 110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 111 { 112 return queue_var_show(queue_max_segments(q), page); 113 } 114 115 static ssize_t queue_max_discard_segments_show(struct request_queue *q, 116 char *page) 117 { 118 return queue_var_show(queue_max_discard_segments(q), page); 119 } 120 121 static ssize_t queue_atomic_write_max_bytes_show(struct request_queue *q, 122 char *page) 123 { 124 return queue_var_show(queue_atomic_write_max_bytes(q), page); 125 } 126 127 static ssize_t queue_atomic_write_boundary_show(struct request_queue *q, 128 char *page) 129 { 130 return queue_var_show(queue_atomic_write_boundary_bytes(q), page); 131 } 132 133 static ssize_t queue_atomic_write_unit_min_show(struct request_queue *q, 134 char *page) 135 { 136 return queue_var_show(queue_atomic_write_unit_min_bytes(q), page); 137 } 138 139 static ssize_t queue_atomic_write_unit_max_show(struct request_queue *q, 140 char *page) 141 { 142 return queue_var_show(queue_atomic_write_unit_max_bytes(q), page); 143 } 144 145 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 146 { 147 return queue_var_show(q->limits.max_integrity_segments, page); 148 } 149 150 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 151 { 152 return queue_var_show(queue_max_segment_size(q), page); 153 } 154 155 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 156 { 157 return queue_var_show(queue_logical_block_size(q), page); 158 } 159 160 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 161 { 162 return queue_var_show(queue_physical_block_size(q), page); 163 } 164 165 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 166 { 167 return queue_var_show(q->limits.chunk_sectors, page); 168 } 169 170 static ssize_t queue_io_min_show(struct request_queue *q, char *page) 171 { 172 return queue_var_show(queue_io_min(q), page); 173 } 174 175 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 176 { 177 return queue_var_show(queue_io_opt(q), page); 178 } 179 180 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 181 { 182 return queue_var_show(q->limits.discard_granularity, page); 183 } 184 185 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 186 { 187 188 return sprintf(page, "%llu\n", 189 (unsigned long long)q->limits.max_hw_discard_sectors << 9); 190 } 191 192 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 193 { 194 return sprintf(page, "%llu\n", 195 (unsigned long long)q->limits.max_discard_sectors << 9); 196 } 197 198 static ssize_t queue_discard_max_store(struct request_queue *q, 199 const char *page, size_t count) 200 { 201 unsigned long max_discard_bytes; 202 struct queue_limits lim; 203 ssize_t ret; 204 int err; 205 206 ret = queue_var_store(&max_discard_bytes, page, count); 207 if (ret < 0) 208 return ret; 209 210 if (max_discard_bytes & (q->limits.discard_granularity - 1)) 211 return -EINVAL; 212 213 if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) 214 return -EINVAL; 215 216 lim = queue_limits_start_update(q); 217 lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; 218 err = queue_limits_commit_update(q, &lim); 219 if (err) 220 return err; 221 return ret; 222 } 223 224 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 225 { 226 return queue_var_show(0, page); 227 } 228 229 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 230 { 231 return queue_var_show(0, page); 232 } 233 234 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 235 { 236 return sprintf(page, "%llu\n", 237 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 238 } 239 240 static ssize_t queue_zone_write_granularity_show(struct request_queue *q, 241 char *page) 242 { 243 return queue_var_show(queue_zone_write_granularity(q), page); 244 } 245 246 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) 247 { 248 unsigned long long max_sectors = queue_max_zone_append_sectors(q); 249 250 return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); 251 } 252 253 static ssize_t 254 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 255 { 256 unsigned long max_sectors_kb; 257 struct queue_limits lim; 258 ssize_t ret; 259 int err; 260 261 ret = queue_var_store(&max_sectors_kb, page, count); 262 if (ret < 0) 263 return ret; 264 265 lim = queue_limits_start_update(q); 266 lim.max_user_sectors = max_sectors_kb << 1; 267 err = queue_limits_commit_update(q, &lim); 268 if (err) 269 return err; 270 return ret; 271 } 272 273 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 274 { 275 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 276 277 return queue_var_show(max_hw_sectors_kb, page); 278 } 279 280 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) 281 { 282 return queue_var_show(q->limits.virt_boundary_mask, page); 283 } 284 285 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) 286 { 287 return queue_var_show(queue_dma_alignment(q), page); 288 } 289 290 static ssize_t queue_feature_store(struct request_queue *q, const char *page, 291 size_t count, blk_features_t feature) 292 { 293 struct queue_limits lim; 294 unsigned long val; 295 ssize_t ret; 296 297 ret = queue_var_store(&val, page, count); 298 if (ret < 0) 299 return ret; 300 301 lim = queue_limits_start_update(q); 302 if (val) 303 lim.features |= feature; 304 else 305 lim.features &= ~feature; 306 ret = queue_limits_commit_update(q, &lim); 307 if (ret) 308 return ret; 309 return count; 310 } 311 312 #define QUEUE_SYSFS_FEATURE(_name, _feature) \ 313 static ssize_t queue_##_name##_show(struct request_queue *q, char *page) \ 314 { \ 315 return sprintf(page, "%u\n", !!(q->limits.features & _feature)); \ 316 } \ 317 static ssize_t queue_##_name##_store(struct request_queue *q, \ 318 const char *page, size_t count) \ 319 { \ 320 return queue_feature_store(q, page, count, _feature); \ 321 } 322 323 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) 324 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM) 325 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT) 326 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES); 327 328 static ssize_t queue_zoned_show(struct request_queue *q, char *page) 329 { 330 if (blk_queue_is_zoned(q)) 331 return sprintf(page, "host-managed\n"); 332 return sprintf(page, "none\n"); 333 } 334 335 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) 336 { 337 return queue_var_show(disk_nr_zones(q->disk), page); 338 } 339 340 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) 341 { 342 return queue_var_show(bdev_max_open_zones(q->disk->part0), page); 343 } 344 345 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) 346 { 347 return queue_var_show(bdev_max_active_zones(q->disk->part0), page); 348 } 349 350 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 351 { 352 return queue_var_show((blk_queue_nomerges(q) << 1) | 353 blk_queue_noxmerges(q), page); 354 } 355 356 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 357 size_t count) 358 { 359 unsigned long nm; 360 ssize_t ret = queue_var_store(&nm, page, count); 361 362 if (ret < 0) 363 return ret; 364 365 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 366 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 367 if (nm == 2) 368 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 369 else if (nm) 370 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 371 372 return ret; 373 } 374 375 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 376 { 377 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 378 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 379 380 return queue_var_show(set << force, page); 381 } 382 383 static ssize_t 384 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 385 { 386 ssize_t ret = -EINVAL; 387 #ifdef CONFIG_SMP 388 unsigned long val; 389 390 ret = queue_var_store(&val, page, count); 391 if (ret < 0) 392 return ret; 393 394 if (val == 2) { 395 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 396 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 397 } else if (val == 1) { 398 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 399 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 400 } else if (val == 0) { 401 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 402 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 403 } 404 #endif 405 return ret; 406 } 407 408 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 409 { 410 return sprintf(page, "%d\n", -1); 411 } 412 413 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 414 size_t count) 415 { 416 return count; 417 } 418 419 static ssize_t queue_poll_show(struct request_queue *q, char *page) 420 { 421 return queue_var_show(!!(q->limits.features & BLK_FEAT_POLL), page); 422 } 423 424 static ssize_t queue_poll_store(struct request_queue *q, const char *page, 425 size_t count) 426 { 427 if (!(q->limits.features & BLK_FEAT_POLL)) 428 return -EINVAL; 429 pr_info_ratelimited("writes to the poll attribute are ignored.\n"); 430 pr_info_ratelimited("please use driver specific parameters instead.\n"); 431 return count; 432 } 433 434 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) 435 { 436 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); 437 } 438 439 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, 440 size_t count) 441 { 442 unsigned int val; 443 int err; 444 445 err = kstrtou32(page, 10, &val); 446 if (err || val == 0) 447 return -EINVAL; 448 449 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); 450 451 return count; 452 } 453 454 static ssize_t queue_wc_show(struct request_queue *q, char *page) 455 { 456 if (blk_queue_write_cache(q)) 457 return sprintf(page, "write back\n"); 458 return sprintf(page, "write through\n"); 459 } 460 461 static ssize_t queue_wc_store(struct request_queue *q, const char *page, 462 size_t count) 463 { 464 struct queue_limits lim; 465 bool disable; 466 int err; 467 468 if (!strncmp(page, "write back", 10)) { 469 disable = false; 470 } else if (!strncmp(page, "write through", 13) || 471 !strncmp(page, "none", 4)) { 472 disable = true; 473 } else { 474 return -EINVAL; 475 } 476 477 lim = queue_limits_start_update(q); 478 if (disable) 479 lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED; 480 else 481 lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; 482 err = queue_limits_commit_update(q, &lim); 483 if (err) 484 return err; 485 return count; 486 } 487 488 static ssize_t queue_fua_show(struct request_queue *q, char *page) 489 { 490 return sprintf(page, "%u\n", !!(q->limits.features & BLK_FEAT_FUA)); 491 } 492 493 static ssize_t queue_dax_show(struct request_queue *q, char *page) 494 { 495 return queue_var_show(!!blk_queue_dax(q), page); 496 } 497 498 #define QUEUE_RO_ENTRY(_prefix, _name) \ 499 static struct queue_sysfs_entry _prefix##_entry = { \ 500 .attr = { .name = _name, .mode = 0444 }, \ 501 .show = _prefix##_show, \ 502 }; 503 504 #define QUEUE_RW_ENTRY(_prefix, _name) \ 505 static struct queue_sysfs_entry _prefix##_entry = { \ 506 .attr = { .name = _name, .mode = 0644 }, \ 507 .show = _prefix##_show, \ 508 .store = _prefix##_store, \ 509 }; 510 511 QUEUE_RW_ENTRY(queue_requests, "nr_requests"); 512 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); 513 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); 514 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); 515 QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); 516 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); 517 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); 518 QUEUE_RW_ENTRY(elv_iosched, "scheduler"); 519 520 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); 521 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); 522 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); 523 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); 524 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); 525 526 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); 527 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); 528 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); 529 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); 530 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); 531 532 QUEUE_RO_ENTRY(queue_atomic_write_max_bytes, "atomic_write_max_bytes"); 533 QUEUE_RO_ENTRY(queue_atomic_write_boundary, "atomic_write_boundary_bytes"); 534 QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes"); 535 QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes"); 536 537 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); 538 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); 539 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); 540 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); 541 542 QUEUE_RO_ENTRY(queue_zoned, "zoned"); 543 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); 544 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); 545 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); 546 547 QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); 548 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); 549 QUEUE_RW_ENTRY(queue_poll, "io_poll"); 550 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); 551 QUEUE_RW_ENTRY(queue_wc, "write_cache"); 552 QUEUE_RO_ENTRY(queue_fua, "fua"); 553 QUEUE_RO_ENTRY(queue_dax, "dax"); 554 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); 555 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); 556 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); 557 558 /* legacy alias for logical_block_size: */ 559 static struct queue_sysfs_entry queue_hw_sector_size_entry = { 560 .attr = {.name = "hw_sector_size", .mode = 0444 }, 561 .show = queue_logical_block_size_show, 562 }; 563 564 QUEUE_RW_ENTRY(queue_rotational, "rotational"); 565 QUEUE_RW_ENTRY(queue_iostats, "iostats"); 566 QUEUE_RW_ENTRY(queue_add_random, "add_random"); 567 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); 568 569 #ifdef CONFIG_BLK_WBT 570 static ssize_t queue_var_store64(s64 *var, const char *page) 571 { 572 int err; 573 s64 v; 574 575 err = kstrtos64(page, 10, &v); 576 if (err < 0) 577 return err; 578 579 *var = v; 580 return 0; 581 } 582 583 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 584 { 585 if (!wbt_rq_qos(q)) 586 return -EINVAL; 587 588 if (wbt_disabled(q)) 589 return sprintf(page, "0\n"); 590 591 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); 592 } 593 594 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 595 size_t count) 596 { 597 struct rq_qos *rqos; 598 ssize_t ret; 599 s64 val; 600 601 ret = queue_var_store64(&val, page); 602 if (ret < 0) 603 return ret; 604 if (val < -1) 605 return -EINVAL; 606 607 rqos = wbt_rq_qos(q); 608 if (!rqos) { 609 ret = wbt_init(q->disk); 610 if (ret) 611 return ret; 612 } 613 614 if (val == -1) 615 val = wbt_default_latency_nsec(q); 616 else if (val >= 0) 617 val *= 1000ULL; 618 619 if (wbt_get_min_lat(q) == val) 620 return count; 621 622 /* 623 * Ensure that the queue is idled, in case the latency update 624 * ends up either enabling or disabling wbt completely. We can't 625 * have IO inflight if that happens. 626 */ 627 blk_mq_quiesce_queue(q); 628 629 wbt_set_min_lat(q, val); 630 631 blk_mq_unquiesce_queue(q); 632 633 return count; 634 } 635 636 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); 637 #endif 638 639 /* Common attributes for bio-based and request-based queues. */ 640 static struct attribute *queue_attrs[] = { 641 &queue_ra_entry.attr, 642 &queue_max_hw_sectors_entry.attr, 643 &queue_max_sectors_entry.attr, 644 &queue_max_segments_entry.attr, 645 &queue_max_discard_segments_entry.attr, 646 &queue_max_integrity_segments_entry.attr, 647 &queue_max_segment_size_entry.attr, 648 &queue_hw_sector_size_entry.attr, 649 &queue_logical_block_size_entry.attr, 650 &queue_physical_block_size_entry.attr, 651 &queue_chunk_sectors_entry.attr, 652 &queue_io_min_entry.attr, 653 &queue_io_opt_entry.attr, 654 &queue_discard_granularity_entry.attr, 655 &queue_discard_max_entry.attr, 656 &queue_discard_max_hw_entry.attr, 657 &queue_discard_zeroes_data_entry.attr, 658 &queue_atomic_write_max_bytes_entry.attr, 659 &queue_atomic_write_boundary_entry.attr, 660 &queue_atomic_write_unit_min_entry.attr, 661 &queue_atomic_write_unit_max_entry.attr, 662 &queue_write_same_max_entry.attr, 663 &queue_write_zeroes_max_entry.attr, 664 &queue_zone_append_max_entry.attr, 665 &queue_zone_write_granularity_entry.attr, 666 &queue_rotational_entry.attr, 667 &queue_zoned_entry.attr, 668 &queue_nr_zones_entry.attr, 669 &queue_max_open_zones_entry.attr, 670 &queue_max_active_zones_entry.attr, 671 &queue_nomerges_entry.attr, 672 &queue_iostats_entry.attr, 673 &queue_stable_writes_entry.attr, 674 &queue_add_random_entry.attr, 675 &queue_poll_entry.attr, 676 &queue_wc_entry.attr, 677 &queue_fua_entry.attr, 678 &queue_dax_entry.attr, 679 &queue_poll_delay_entry.attr, 680 &queue_virt_boundary_mask_entry.attr, 681 &queue_dma_alignment_entry.attr, 682 NULL, 683 }; 684 685 /* Request-based queue attributes that are not relevant for bio-based queues. */ 686 static struct attribute *blk_mq_queue_attrs[] = { 687 &queue_requests_entry.attr, 688 &elv_iosched_entry.attr, 689 &queue_rq_affinity_entry.attr, 690 &queue_io_timeout_entry.attr, 691 #ifdef CONFIG_BLK_WBT 692 &queue_wb_lat_entry.attr, 693 #endif 694 NULL, 695 }; 696 697 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, 698 int n) 699 { 700 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); 701 struct request_queue *q = disk->queue; 702 703 if ((attr == &queue_max_open_zones_entry.attr || 704 attr == &queue_max_active_zones_entry.attr) && 705 !blk_queue_is_zoned(q)) 706 return 0; 707 708 return attr->mode; 709 } 710 711 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj, 712 struct attribute *attr, int n) 713 { 714 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); 715 struct request_queue *q = disk->queue; 716 717 if (!queue_is_mq(q)) 718 return 0; 719 720 if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout) 721 return 0; 722 723 return attr->mode; 724 } 725 726 static struct attribute_group queue_attr_group = { 727 .attrs = queue_attrs, 728 .is_visible = queue_attr_visible, 729 }; 730 731 static struct attribute_group blk_mq_queue_attr_group = { 732 .attrs = blk_mq_queue_attrs, 733 .is_visible = blk_mq_queue_attr_visible, 734 }; 735 736 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 737 738 static ssize_t 739 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 740 { 741 struct queue_sysfs_entry *entry = to_queue(attr); 742 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); 743 struct request_queue *q = disk->queue; 744 ssize_t res; 745 746 if (!entry->show) 747 return -EIO; 748 mutex_lock(&q->sysfs_lock); 749 res = entry->show(q, page); 750 mutex_unlock(&q->sysfs_lock); 751 return res; 752 } 753 754 static ssize_t 755 queue_attr_store(struct kobject *kobj, struct attribute *attr, 756 const char *page, size_t length) 757 { 758 struct queue_sysfs_entry *entry = to_queue(attr); 759 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); 760 struct request_queue *q = disk->queue; 761 ssize_t res; 762 763 if (!entry->store) 764 return -EIO; 765 766 blk_mq_freeze_queue(q); 767 mutex_lock(&q->sysfs_lock); 768 res = entry->store(q, page, length); 769 mutex_unlock(&q->sysfs_lock); 770 blk_mq_unfreeze_queue(q); 771 return res; 772 } 773 774 static const struct sysfs_ops queue_sysfs_ops = { 775 .show = queue_attr_show, 776 .store = queue_attr_store, 777 }; 778 779 static const struct attribute_group *blk_queue_attr_groups[] = { 780 &queue_attr_group, 781 &blk_mq_queue_attr_group, 782 NULL 783 }; 784 785 static void blk_queue_release(struct kobject *kobj) 786 { 787 /* nothing to do here, all data is associated with the parent gendisk */ 788 } 789 790 static const struct kobj_type blk_queue_ktype = { 791 .default_groups = blk_queue_attr_groups, 792 .sysfs_ops = &queue_sysfs_ops, 793 .release = blk_queue_release, 794 }; 795 796 static void blk_debugfs_remove(struct gendisk *disk) 797 { 798 struct request_queue *q = disk->queue; 799 800 mutex_lock(&q->debugfs_mutex); 801 blk_trace_shutdown(q); 802 debugfs_remove_recursive(q->debugfs_dir); 803 q->debugfs_dir = NULL; 804 q->sched_debugfs_dir = NULL; 805 q->rqos_debugfs_dir = NULL; 806 mutex_unlock(&q->debugfs_mutex); 807 } 808 809 /** 810 * blk_register_queue - register a block layer queue with sysfs 811 * @disk: Disk of which the request queue should be registered with sysfs. 812 */ 813 int blk_register_queue(struct gendisk *disk) 814 { 815 struct request_queue *q = disk->queue; 816 int ret; 817 818 mutex_lock(&q->sysfs_dir_lock); 819 kobject_init(&disk->queue_kobj, &blk_queue_ktype); 820 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); 821 if (ret < 0) 822 goto out_put_queue_kobj; 823 824 if (queue_is_mq(q)) { 825 ret = blk_mq_sysfs_register(disk); 826 if (ret) 827 goto out_put_queue_kobj; 828 } 829 mutex_lock(&q->sysfs_lock); 830 831 mutex_lock(&q->debugfs_mutex); 832 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); 833 if (queue_is_mq(q)) 834 blk_mq_debugfs_register(q); 835 mutex_unlock(&q->debugfs_mutex); 836 837 ret = disk_register_independent_access_ranges(disk); 838 if (ret) 839 goto out_debugfs_remove; 840 841 if (q->elevator) { 842 ret = elv_register_queue(q, false); 843 if (ret) 844 goto out_unregister_ia_ranges; 845 } 846 847 ret = blk_crypto_sysfs_register(disk); 848 if (ret) 849 goto out_elv_unregister; 850 851 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 852 wbt_enable_default(disk); 853 854 /* Now everything is ready and send out KOBJ_ADD uevent */ 855 kobject_uevent(&disk->queue_kobj, KOBJ_ADD); 856 if (q->elevator) 857 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); 858 mutex_unlock(&q->sysfs_lock); 859 mutex_unlock(&q->sysfs_dir_lock); 860 861 /* 862 * SCSI probing may synchronously create and destroy a lot of 863 * request_queues for non-existent devices. Shutting down a fully 864 * functional queue takes measureable wallclock time as RCU grace 865 * periods are involved. To avoid excessive latency in these 866 * cases, a request_queue starts out in a degraded mode which is 867 * faster to shut down and is made fully functional here as 868 * request_queues for non-existent devices never get registered. 869 */ 870 if (!blk_queue_init_done(q)) { 871 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); 872 percpu_ref_switch_to_percpu(&q->q_usage_counter); 873 } 874 875 return ret; 876 877 out_elv_unregister: 878 elv_unregister_queue(q); 879 out_unregister_ia_ranges: 880 disk_unregister_independent_access_ranges(disk); 881 out_debugfs_remove: 882 blk_debugfs_remove(disk); 883 mutex_unlock(&q->sysfs_lock); 884 out_put_queue_kobj: 885 kobject_put(&disk->queue_kobj); 886 mutex_unlock(&q->sysfs_dir_lock); 887 return ret; 888 } 889 890 /** 891 * blk_unregister_queue - counterpart of blk_register_queue() 892 * @disk: Disk of which the request queue should be unregistered from sysfs. 893 * 894 * Note: the caller is responsible for guaranteeing that this function is called 895 * after blk_register_queue() has finished. 896 */ 897 void blk_unregister_queue(struct gendisk *disk) 898 { 899 struct request_queue *q = disk->queue; 900 901 if (WARN_ON(!q)) 902 return; 903 904 /* Return early if disk->queue was never registered. */ 905 if (!blk_queue_registered(q)) 906 return; 907 908 /* 909 * Since sysfs_remove_dir() prevents adding new directory entries 910 * before removal of existing entries starts, protect against 911 * concurrent elv_iosched_store() calls. 912 */ 913 mutex_lock(&q->sysfs_lock); 914 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 915 mutex_unlock(&q->sysfs_lock); 916 917 mutex_lock(&q->sysfs_dir_lock); 918 /* 919 * Remove the sysfs attributes before unregistering the queue data 920 * structures that can be modified through sysfs. 921 */ 922 if (queue_is_mq(q)) 923 blk_mq_sysfs_unregister(disk); 924 blk_crypto_sysfs_unregister(disk); 925 926 mutex_lock(&q->sysfs_lock); 927 elv_unregister_queue(q); 928 disk_unregister_independent_access_ranges(disk); 929 mutex_unlock(&q->sysfs_lock); 930 931 /* Now that we've deleted all child objects, we can delete the queue. */ 932 kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); 933 kobject_del(&disk->queue_kobj); 934 mutex_unlock(&q->sysfs_dir_lock); 935 936 blk_debugfs_remove(disk); 937 } 938