1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to sysfs handling 4 */ 5 #include <linux/kernel.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/backing-dev.h> 11 #include <linux/blktrace_api.h> 12 #include <linux/debugfs.h> 13 14 #include "blk.h" 15 #include "blk-mq.h" 16 #include "blk-mq-debugfs.h" 17 #include "blk-mq-sched.h" 18 #include "blk-rq-qos.h" 19 #include "blk-wbt.h" 20 #include "blk-cgroup.h" 21 #include "blk-throttle.h" 22 23 struct queue_sysfs_entry { 24 struct attribute attr; 25 ssize_t (*show)(struct request_queue *, char *); 26 ssize_t (*store)(struct request_queue *, const char *, size_t); 27 }; 28 29 static ssize_t 30 queue_var_show(unsigned long var, char *page) 31 { 32 return sprintf(page, "%lu\n", var); 33 } 34 35 static ssize_t 36 queue_var_store(unsigned long *var, const char *page, size_t count) 37 { 38 int err; 39 unsigned long v; 40 41 err = kstrtoul(page, 10, &v); 42 if (err || v > UINT_MAX) 43 return -EINVAL; 44 45 *var = v; 46 47 return count; 48 } 49 50 static ssize_t queue_requests_show(struct request_queue *q, char *page) 51 { 52 return queue_var_show(q->nr_requests, page); 53 } 54 55 static ssize_t 56 queue_requests_store(struct request_queue *q, const char *page, size_t count) 57 { 58 unsigned long nr; 59 int ret, err; 60 61 if (!queue_is_mq(q)) 62 return -EINVAL; 63 64 ret = queue_var_store(&nr, page, count); 65 if (ret < 0) 66 return ret; 67 68 if (nr < BLKDEV_MIN_RQ) 69 nr = BLKDEV_MIN_RQ; 70 71 err = blk_mq_update_nr_requests(q, nr); 72 if (err) 73 return err; 74 75 return ret; 76 } 77 78 static ssize_t queue_ra_show(struct request_queue *q, char *page) 79 { 80 unsigned long ra_kb; 81 82 if (!q->disk) 83 return -EINVAL; 84 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); 85 return queue_var_show(ra_kb, page); 86 } 87 88 static ssize_t 89 queue_ra_store(struct request_queue *q, const char *page, size_t count) 90 { 91 unsigned long ra_kb; 92 ssize_t ret; 93 94 if (!q->disk) 95 return -EINVAL; 96 ret = queue_var_store(&ra_kb, page, count); 97 if (ret < 0) 98 return ret; 99 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 100 return ret; 101 } 102 103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 104 { 105 int max_sectors_kb = queue_max_sectors(q) >> 1; 106 107 return queue_var_show(max_sectors_kb, page); 108 } 109 110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 111 { 112 return queue_var_show(queue_max_segments(q), page); 113 } 114 115 static ssize_t queue_max_discard_segments_show(struct request_queue *q, 116 char *page) 117 { 118 return queue_var_show(queue_max_discard_segments(q), page); 119 } 120 121 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 122 { 123 return queue_var_show(q->limits.max_integrity_segments, page); 124 } 125 126 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 127 { 128 return queue_var_show(queue_max_segment_size(q), page); 129 } 130 131 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 132 { 133 return queue_var_show(queue_logical_block_size(q), page); 134 } 135 136 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 137 { 138 return queue_var_show(queue_physical_block_size(q), page); 139 } 140 141 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 142 { 143 return queue_var_show(q->limits.chunk_sectors, page); 144 } 145 146 static ssize_t queue_io_min_show(struct request_queue *q, char *page) 147 { 148 return queue_var_show(queue_io_min(q), page); 149 } 150 151 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 152 { 153 return queue_var_show(queue_io_opt(q), page); 154 } 155 156 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 157 { 158 return queue_var_show(q->limits.discard_granularity, page); 159 } 160 161 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 162 { 163 164 return sprintf(page, "%llu\n", 165 (unsigned long long)q->limits.max_hw_discard_sectors << 9); 166 } 167 168 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 169 { 170 return sprintf(page, "%llu\n", 171 (unsigned long long)q->limits.max_discard_sectors << 9); 172 } 173 174 static ssize_t queue_discard_max_store(struct request_queue *q, 175 const char *page, size_t count) 176 { 177 unsigned long max_discard; 178 ssize_t ret = queue_var_store(&max_discard, page, count); 179 180 if (ret < 0) 181 return ret; 182 183 if (max_discard & (q->limits.discard_granularity - 1)) 184 return -EINVAL; 185 186 max_discard >>= 9; 187 if (max_discard > UINT_MAX) 188 return -EINVAL; 189 190 if (max_discard > q->limits.max_hw_discard_sectors) 191 max_discard = q->limits.max_hw_discard_sectors; 192 193 q->limits.max_discard_sectors = max_discard; 194 return ret; 195 } 196 197 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 198 { 199 return queue_var_show(0, page); 200 } 201 202 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 203 { 204 return queue_var_show(0, page); 205 } 206 207 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 208 { 209 return sprintf(page, "%llu\n", 210 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 211 } 212 213 static ssize_t queue_zone_write_granularity_show(struct request_queue *q, 214 char *page) 215 { 216 return queue_var_show(queue_zone_write_granularity(q), page); 217 } 218 219 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) 220 { 221 unsigned long long max_sectors = q->limits.max_zone_append_sectors; 222 223 return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); 224 } 225 226 static ssize_t 227 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 228 { 229 unsigned long var; 230 unsigned int max_sectors_kb, 231 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 232 page_kb = 1 << (PAGE_SHIFT - 10); 233 ssize_t ret = queue_var_store(&var, page, count); 234 235 if (ret < 0) 236 return ret; 237 238 max_sectors_kb = (unsigned int)var; 239 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, 240 q->limits.max_dev_sectors >> 1); 241 if (max_sectors_kb == 0) { 242 q->limits.max_user_sectors = 0; 243 max_sectors_kb = min(max_hw_sectors_kb, 244 BLK_DEF_MAX_SECTORS_CAP >> 1); 245 } else { 246 if (max_sectors_kb > max_hw_sectors_kb || 247 max_sectors_kb < page_kb) 248 return -EINVAL; 249 q->limits.max_user_sectors = max_sectors_kb << 1; 250 } 251 252 spin_lock_irq(&q->queue_lock); 253 q->limits.max_sectors = max_sectors_kb << 1; 254 if (q->disk) 255 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 256 spin_unlock_irq(&q->queue_lock); 257 258 return ret; 259 } 260 261 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 262 { 263 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 264 265 return queue_var_show(max_hw_sectors_kb, page); 266 } 267 268 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) 269 { 270 return queue_var_show(q->limits.virt_boundary_mask, page); 271 } 272 273 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) 274 { 275 return queue_var_show(queue_dma_alignment(q), page); 276 } 277 278 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 279 static ssize_t \ 280 queue_##name##_show(struct request_queue *q, char *page) \ 281 { \ 282 int bit; \ 283 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 284 return queue_var_show(neg ? !bit : bit, page); \ 285 } \ 286 static ssize_t \ 287 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ 288 { \ 289 unsigned long val; \ 290 ssize_t ret; \ 291 ret = queue_var_store(&val, page, count); \ 292 if (ret < 0) \ 293 return ret; \ 294 if (neg) \ 295 val = !val; \ 296 \ 297 if (val) \ 298 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ 299 else \ 300 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ 301 return ret; \ 302 } 303 304 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 305 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 306 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 307 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); 308 #undef QUEUE_SYSFS_BIT_FNS 309 310 static ssize_t queue_zoned_show(struct request_queue *q, char *page) 311 { 312 if (blk_queue_is_zoned(q)) 313 return sprintf(page, "host-managed\n"); 314 return sprintf(page, "none\n"); 315 } 316 317 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) 318 { 319 return queue_var_show(disk_nr_zones(q->disk), page); 320 } 321 322 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) 323 { 324 return queue_var_show(bdev_max_open_zones(q->disk->part0), page); 325 } 326 327 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) 328 { 329 return queue_var_show(bdev_max_active_zones(q->disk->part0), page); 330 } 331 332 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 333 { 334 return queue_var_show((blk_queue_nomerges(q) << 1) | 335 blk_queue_noxmerges(q), page); 336 } 337 338 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 339 size_t count) 340 { 341 unsigned long nm; 342 ssize_t ret = queue_var_store(&nm, page, count); 343 344 if (ret < 0) 345 return ret; 346 347 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 348 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 349 if (nm == 2) 350 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 351 else if (nm) 352 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 353 354 return ret; 355 } 356 357 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 358 { 359 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 360 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 361 362 return queue_var_show(set << force, page); 363 } 364 365 static ssize_t 366 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 367 { 368 ssize_t ret = -EINVAL; 369 #ifdef CONFIG_SMP 370 unsigned long val; 371 372 ret = queue_var_store(&val, page, count); 373 if (ret < 0) 374 return ret; 375 376 if (val == 2) { 377 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 378 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 379 } else if (val == 1) { 380 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 381 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 382 } else if (val == 0) { 383 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 384 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 385 } 386 #endif 387 return ret; 388 } 389 390 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 391 { 392 return sprintf(page, "%d\n", -1); 393 } 394 395 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 396 size_t count) 397 { 398 return count; 399 } 400 401 static ssize_t queue_poll_show(struct request_queue *q, char *page) 402 { 403 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 404 } 405 406 static ssize_t queue_poll_store(struct request_queue *q, const char *page, 407 size_t count) 408 { 409 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 410 return -EINVAL; 411 pr_info_ratelimited("writes to the poll attribute are ignored.\n"); 412 pr_info_ratelimited("please use driver specific parameters instead.\n"); 413 return count; 414 } 415 416 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) 417 { 418 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); 419 } 420 421 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, 422 size_t count) 423 { 424 unsigned int val; 425 int err; 426 427 err = kstrtou32(page, 10, &val); 428 if (err || val == 0) 429 return -EINVAL; 430 431 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); 432 433 return count; 434 } 435 436 static ssize_t queue_wc_show(struct request_queue *q, char *page) 437 { 438 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 439 return sprintf(page, "write back\n"); 440 441 return sprintf(page, "write through\n"); 442 } 443 444 static ssize_t queue_wc_store(struct request_queue *q, const char *page, 445 size_t count) 446 { 447 if (!strncmp(page, "write back", 10)) { 448 if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags)) 449 return -EINVAL; 450 blk_queue_flag_set(QUEUE_FLAG_WC, q); 451 } else if (!strncmp(page, "write through", 13) || 452 !strncmp(page, "none", 4)) { 453 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 454 } else { 455 return -EINVAL; 456 } 457 458 return count; 459 } 460 461 static ssize_t queue_fua_show(struct request_queue *q, char *page) 462 { 463 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); 464 } 465 466 static ssize_t queue_dax_show(struct request_queue *q, char *page) 467 { 468 return queue_var_show(blk_queue_dax(q), page); 469 } 470 471 #define QUEUE_RO_ENTRY(_prefix, _name) \ 472 static struct queue_sysfs_entry _prefix##_entry = { \ 473 .attr = { .name = _name, .mode = 0444 }, \ 474 .show = _prefix##_show, \ 475 }; 476 477 #define QUEUE_RW_ENTRY(_prefix, _name) \ 478 static struct queue_sysfs_entry _prefix##_entry = { \ 479 .attr = { .name = _name, .mode = 0644 }, \ 480 .show = _prefix##_show, \ 481 .store = _prefix##_store, \ 482 }; 483 484 QUEUE_RW_ENTRY(queue_requests, "nr_requests"); 485 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); 486 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); 487 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); 488 QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); 489 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); 490 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); 491 QUEUE_RW_ENTRY(elv_iosched, "scheduler"); 492 493 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); 494 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); 495 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); 496 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); 497 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); 498 499 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); 500 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); 501 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); 502 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); 503 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); 504 505 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); 506 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); 507 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); 508 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); 509 510 QUEUE_RO_ENTRY(queue_zoned, "zoned"); 511 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); 512 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); 513 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); 514 515 QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); 516 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); 517 QUEUE_RW_ENTRY(queue_poll, "io_poll"); 518 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); 519 QUEUE_RW_ENTRY(queue_wc, "write_cache"); 520 QUEUE_RO_ENTRY(queue_fua, "fua"); 521 QUEUE_RO_ENTRY(queue_dax, "dax"); 522 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); 523 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); 524 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); 525 526 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 527 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); 528 #endif 529 530 /* legacy alias for logical_block_size: */ 531 static struct queue_sysfs_entry queue_hw_sector_size_entry = { 532 .attr = {.name = "hw_sector_size", .mode = 0444 }, 533 .show = queue_logical_block_size_show, 534 }; 535 536 QUEUE_RW_ENTRY(queue_nonrot, "rotational"); 537 QUEUE_RW_ENTRY(queue_iostats, "iostats"); 538 QUEUE_RW_ENTRY(queue_random, "add_random"); 539 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); 540 541 #ifdef CONFIG_BLK_WBT 542 static ssize_t queue_var_store64(s64 *var, const char *page) 543 { 544 int err; 545 s64 v; 546 547 err = kstrtos64(page, 10, &v); 548 if (err < 0) 549 return err; 550 551 *var = v; 552 return 0; 553 } 554 555 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 556 { 557 if (!wbt_rq_qos(q)) 558 return -EINVAL; 559 560 if (wbt_disabled(q)) 561 return sprintf(page, "0\n"); 562 563 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); 564 } 565 566 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 567 size_t count) 568 { 569 struct rq_qos *rqos; 570 ssize_t ret; 571 s64 val; 572 573 ret = queue_var_store64(&val, page); 574 if (ret < 0) 575 return ret; 576 if (val < -1) 577 return -EINVAL; 578 579 rqos = wbt_rq_qos(q); 580 if (!rqos) { 581 ret = wbt_init(q->disk); 582 if (ret) 583 return ret; 584 } 585 586 if (val == -1) 587 val = wbt_default_latency_nsec(q); 588 else if (val >= 0) 589 val *= 1000ULL; 590 591 if (wbt_get_min_lat(q) == val) 592 return count; 593 594 /* 595 * Ensure that the queue is idled, in case the latency update 596 * ends up either enabling or disabling wbt completely. We can't 597 * have IO inflight if that happens. 598 */ 599 blk_mq_freeze_queue(q); 600 blk_mq_quiesce_queue(q); 601 602 wbt_set_min_lat(q, val); 603 604 blk_mq_unquiesce_queue(q); 605 blk_mq_unfreeze_queue(q); 606 607 return count; 608 } 609 610 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); 611 #endif 612 613 /* Common attributes for bio-based and request-based queues. */ 614 static struct attribute *queue_attrs[] = { 615 &queue_ra_entry.attr, 616 &queue_max_hw_sectors_entry.attr, 617 &queue_max_sectors_entry.attr, 618 &queue_max_segments_entry.attr, 619 &queue_max_discard_segments_entry.attr, 620 &queue_max_integrity_segments_entry.attr, 621 &queue_max_segment_size_entry.attr, 622 &queue_hw_sector_size_entry.attr, 623 &queue_logical_block_size_entry.attr, 624 &queue_physical_block_size_entry.attr, 625 &queue_chunk_sectors_entry.attr, 626 &queue_io_min_entry.attr, 627 &queue_io_opt_entry.attr, 628 &queue_discard_granularity_entry.attr, 629 &queue_discard_max_entry.attr, 630 &queue_discard_max_hw_entry.attr, 631 &queue_discard_zeroes_data_entry.attr, 632 &queue_write_same_max_entry.attr, 633 &queue_write_zeroes_max_entry.attr, 634 &queue_zone_append_max_entry.attr, 635 &queue_zone_write_granularity_entry.attr, 636 &queue_nonrot_entry.attr, 637 &queue_zoned_entry.attr, 638 &queue_nr_zones_entry.attr, 639 &queue_max_open_zones_entry.attr, 640 &queue_max_active_zones_entry.attr, 641 &queue_nomerges_entry.attr, 642 &queue_iostats_entry.attr, 643 &queue_stable_writes_entry.attr, 644 &queue_random_entry.attr, 645 &queue_poll_entry.attr, 646 &queue_wc_entry.attr, 647 &queue_fua_entry.attr, 648 &queue_dax_entry.attr, 649 &queue_poll_delay_entry.attr, 650 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 651 &blk_throtl_sample_time_entry.attr, 652 #endif 653 &queue_virt_boundary_mask_entry.attr, 654 &queue_dma_alignment_entry.attr, 655 NULL, 656 }; 657 658 /* Request-based queue attributes that are not relevant for bio-based queues. */ 659 static struct attribute *blk_mq_queue_attrs[] = { 660 &queue_requests_entry.attr, 661 &elv_iosched_entry.attr, 662 &queue_rq_affinity_entry.attr, 663 &queue_io_timeout_entry.attr, 664 #ifdef CONFIG_BLK_WBT 665 &queue_wb_lat_entry.attr, 666 #endif 667 NULL, 668 }; 669 670 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, 671 int n) 672 { 673 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); 674 struct request_queue *q = disk->queue; 675 676 if ((attr == &queue_max_open_zones_entry.attr || 677 attr == &queue_max_active_zones_entry.attr) && 678 !blk_queue_is_zoned(q)) 679 return 0; 680 681 return attr->mode; 682 } 683 684 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj, 685 struct attribute *attr, int n) 686 { 687 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); 688 struct request_queue *q = disk->queue; 689 690 if (!queue_is_mq(q)) 691 return 0; 692 693 if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout) 694 return 0; 695 696 return attr->mode; 697 } 698 699 static struct attribute_group queue_attr_group = { 700 .attrs = queue_attrs, 701 .is_visible = queue_attr_visible, 702 }; 703 704 static struct attribute_group blk_mq_queue_attr_group = { 705 .attrs = blk_mq_queue_attrs, 706 .is_visible = blk_mq_queue_attr_visible, 707 }; 708 709 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 710 711 static ssize_t 712 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 713 { 714 struct queue_sysfs_entry *entry = to_queue(attr); 715 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); 716 struct request_queue *q = disk->queue; 717 ssize_t res; 718 719 if (!entry->show) 720 return -EIO; 721 mutex_lock(&q->sysfs_lock); 722 res = entry->show(q, page); 723 mutex_unlock(&q->sysfs_lock); 724 return res; 725 } 726 727 static ssize_t 728 queue_attr_store(struct kobject *kobj, struct attribute *attr, 729 const char *page, size_t length) 730 { 731 struct queue_sysfs_entry *entry = to_queue(attr); 732 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); 733 struct request_queue *q = disk->queue; 734 ssize_t res; 735 736 if (!entry->store) 737 return -EIO; 738 739 mutex_lock(&q->sysfs_lock); 740 res = entry->store(q, page, length); 741 mutex_unlock(&q->sysfs_lock); 742 return res; 743 } 744 745 static const struct sysfs_ops queue_sysfs_ops = { 746 .show = queue_attr_show, 747 .store = queue_attr_store, 748 }; 749 750 static const struct attribute_group *blk_queue_attr_groups[] = { 751 &queue_attr_group, 752 &blk_mq_queue_attr_group, 753 NULL 754 }; 755 756 static void blk_queue_release(struct kobject *kobj) 757 { 758 /* nothing to do here, all data is associated with the parent gendisk */ 759 } 760 761 static const struct kobj_type blk_queue_ktype = { 762 .default_groups = blk_queue_attr_groups, 763 .sysfs_ops = &queue_sysfs_ops, 764 .release = blk_queue_release, 765 }; 766 767 static void blk_debugfs_remove(struct gendisk *disk) 768 { 769 struct request_queue *q = disk->queue; 770 771 mutex_lock(&q->debugfs_mutex); 772 blk_trace_shutdown(q); 773 debugfs_remove_recursive(q->debugfs_dir); 774 q->debugfs_dir = NULL; 775 q->sched_debugfs_dir = NULL; 776 q->rqos_debugfs_dir = NULL; 777 mutex_unlock(&q->debugfs_mutex); 778 } 779 780 /** 781 * blk_register_queue - register a block layer queue with sysfs 782 * @disk: Disk of which the request queue should be registered with sysfs. 783 */ 784 int blk_register_queue(struct gendisk *disk) 785 { 786 struct request_queue *q = disk->queue; 787 int ret; 788 789 mutex_lock(&q->sysfs_dir_lock); 790 kobject_init(&disk->queue_kobj, &blk_queue_ktype); 791 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); 792 if (ret < 0) 793 goto out_put_queue_kobj; 794 795 if (queue_is_mq(q)) { 796 ret = blk_mq_sysfs_register(disk); 797 if (ret) 798 goto out_put_queue_kobj; 799 } 800 mutex_lock(&q->sysfs_lock); 801 802 mutex_lock(&q->debugfs_mutex); 803 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); 804 if (queue_is_mq(q)) 805 blk_mq_debugfs_register(q); 806 mutex_unlock(&q->debugfs_mutex); 807 808 ret = disk_register_independent_access_ranges(disk); 809 if (ret) 810 goto out_debugfs_remove; 811 812 if (q->elevator) { 813 ret = elv_register_queue(q, false); 814 if (ret) 815 goto out_unregister_ia_ranges; 816 } 817 818 ret = blk_crypto_sysfs_register(disk); 819 if (ret) 820 goto out_elv_unregister; 821 822 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 823 wbt_enable_default(disk); 824 blk_throtl_register(disk); 825 826 /* Now everything is ready and send out KOBJ_ADD uevent */ 827 kobject_uevent(&disk->queue_kobj, KOBJ_ADD); 828 if (q->elevator) 829 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); 830 mutex_unlock(&q->sysfs_lock); 831 mutex_unlock(&q->sysfs_dir_lock); 832 833 /* 834 * SCSI probing may synchronously create and destroy a lot of 835 * request_queues for non-existent devices. Shutting down a fully 836 * functional queue takes measureable wallclock time as RCU grace 837 * periods are involved. To avoid excessive latency in these 838 * cases, a request_queue starts out in a degraded mode which is 839 * faster to shut down and is made fully functional here as 840 * request_queues for non-existent devices never get registered. 841 */ 842 if (!blk_queue_init_done(q)) { 843 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); 844 percpu_ref_switch_to_percpu(&q->q_usage_counter); 845 } 846 847 return ret; 848 849 out_elv_unregister: 850 elv_unregister_queue(q); 851 out_unregister_ia_ranges: 852 disk_unregister_independent_access_ranges(disk); 853 out_debugfs_remove: 854 blk_debugfs_remove(disk); 855 mutex_unlock(&q->sysfs_lock); 856 out_put_queue_kobj: 857 kobject_put(&disk->queue_kobj); 858 mutex_unlock(&q->sysfs_dir_lock); 859 return ret; 860 } 861 862 /** 863 * blk_unregister_queue - counterpart of blk_register_queue() 864 * @disk: Disk of which the request queue should be unregistered from sysfs. 865 * 866 * Note: the caller is responsible for guaranteeing that this function is called 867 * after blk_register_queue() has finished. 868 */ 869 void blk_unregister_queue(struct gendisk *disk) 870 { 871 struct request_queue *q = disk->queue; 872 873 if (WARN_ON(!q)) 874 return; 875 876 /* Return early if disk->queue was never registered. */ 877 if (!blk_queue_registered(q)) 878 return; 879 880 /* 881 * Since sysfs_remove_dir() prevents adding new directory entries 882 * before removal of existing entries starts, protect against 883 * concurrent elv_iosched_store() calls. 884 */ 885 mutex_lock(&q->sysfs_lock); 886 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 887 mutex_unlock(&q->sysfs_lock); 888 889 mutex_lock(&q->sysfs_dir_lock); 890 /* 891 * Remove the sysfs attributes before unregistering the queue data 892 * structures that can be modified through sysfs. 893 */ 894 if (queue_is_mq(q)) 895 blk_mq_sysfs_unregister(disk); 896 blk_crypto_sysfs_unregister(disk); 897 898 mutex_lock(&q->sysfs_lock); 899 elv_unregister_queue(q); 900 disk_unregister_independent_access_ranges(disk); 901 mutex_unlock(&q->sysfs_lock); 902 903 /* Now that we've deleted all child objects, we can delete the queue. */ 904 kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); 905 kobject_del(&disk->queue_kobj); 906 mutex_unlock(&q->sysfs_dir_lock); 907 908 blk_debugfs_remove(disk); 909 } 910