1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to sysfs handling 4 */ 5 #include <linux/kernel.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/backing-dev.h> 11 #include <linux/blktrace_api.h> 12 #include <linux/blk-mq.h> 13 #include <linux/blk-cgroup.h> 14 #include <linux/debugfs.h> 15 16 #include "blk.h" 17 #include "blk-mq.h" 18 #include "blk-mq-debugfs.h" 19 #include "blk-wbt.h" 20 21 struct queue_sysfs_entry { 22 struct attribute attr; 23 ssize_t (*show)(struct request_queue *, char *); 24 ssize_t (*store)(struct request_queue *, const char *, size_t); 25 }; 26 27 static ssize_t 28 queue_var_show(unsigned long var, char *page) 29 { 30 return sprintf(page, "%lu\n", var); 31 } 32 33 static ssize_t 34 queue_var_store(unsigned long *var, const char *page, size_t count) 35 { 36 int err; 37 unsigned long v; 38 39 err = kstrtoul(page, 10, &v); 40 if (err || v > UINT_MAX) 41 return -EINVAL; 42 43 *var = v; 44 45 return count; 46 } 47 48 static ssize_t queue_var_store64(s64 *var, const char *page) 49 { 50 int err; 51 s64 v; 52 53 err = kstrtos64(page, 10, &v); 54 if (err < 0) 55 return err; 56 57 *var = v; 58 return 0; 59 } 60 61 static ssize_t queue_requests_show(struct request_queue *q, char *page) 62 { 63 return queue_var_show(q->nr_requests, (page)); 64 } 65 66 static ssize_t 67 queue_requests_store(struct request_queue *q, const char *page, size_t count) 68 { 69 unsigned long nr; 70 int ret, err; 71 72 if (!queue_is_mq(q)) 73 return -EINVAL; 74 75 ret = queue_var_store(&nr, page, count); 76 if (ret < 0) 77 return ret; 78 79 if (nr < BLKDEV_MIN_RQ) 80 nr = BLKDEV_MIN_RQ; 81 82 err = blk_mq_update_nr_requests(q, nr); 83 if (err) 84 return err; 85 86 return ret; 87 } 88 89 static ssize_t queue_ra_show(struct request_queue *q, char *page) 90 { 91 unsigned long ra_kb = q->backing_dev_info->ra_pages << 92 (PAGE_SHIFT - 10); 93 94 return queue_var_show(ra_kb, (page)); 95 } 96 97 static ssize_t 98 queue_ra_store(struct request_queue *q, const char *page, size_t count) 99 { 100 unsigned long ra_kb; 101 ssize_t ret = queue_var_store(&ra_kb, page, count); 102 103 if (ret < 0) 104 return ret; 105 106 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 107 108 return ret; 109 } 110 111 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 112 { 113 int max_sectors_kb = queue_max_sectors(q) >> 1; 114 115 return queue_var_show(max_sectors_kb, (page)); 116 } 117 118 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 119 { 120 return queue_var_show(queue_max_segments(q), (page)); 121 } 122 123 static ssize_t queue_max_discard_segments_show(struct request_queue *q, 124 char *page) 125 { 126 return queue_var_show(queue_max_discard_segments(q), (page)); 127 } 128 129 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 130 { 131 return queue_var_show(q->limits.max_integrity_segments, (page)); 132 } 133 134 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 135 { 136 return queue_var_show(queue_max_segment_size(q), (page)); 137 } 138 139 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 140 { 141 return queue_var_show(queue_logical_block_size(q), page); 142 } 143 144 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 145 { 146 return queue_var_show(queue_physical_block_size(q), page); 147 } 148 149 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 150 { 151 return queue_var_show(q->limits.chunk_sectors, page); 152 } 153 154 static ssize_t queue_io_min_show(struct request_queue *q, char *page) 155 { 156 return queue_var_show(queue_io_min(q), page); 157 } 158 159 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 160 { 161 return queue_var_show(queue_io_opt(q), page); 162 } 163 164 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 165 { 166 return queue_var_show(q->limits.discard_granularity, page); 167 } 168 169 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 170 { 171 172 return sprintf(page, "%llu\n", 173 (unsigned long long)q->limits.max_hw_discard_sectors << 9); 174 } 175 176 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 177 { 178 return sprintf(page, "%llu\n", 179 (unsigned long long)q->limits.max_discard_sectors << 9); 180 } 181 182 static ssize_t queue_discard_max_store(struct request_queue *q, 183 const char *page, size_t count) 184 { 185 unsigned long max_discard; 186 ssize_t ret = queue_var_store(&max_discard, page, count); 187 188 if (ret < 0) 189 return ret; 190 191 if (max_discard & (q->limits.discard_granularity - 1)) 192 return -EINVAL; 193 194 max_discard >>= 9; 195 if (max_discard > UINT_MAX) 196 return -EINVAL; 197 198 if (max_discard > q->limits.max_hw_discard_sectors) 199 max_discard = q->limits.max_hw_discard_sectors; 200 201 q->limits.max_discard_sectors = max_discard; 202 return ret; 203 } 204 205 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 206 { 207 return queue_var_show(0, page); 208 } 209 210 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 211 { 212 return sprintf(page, "%llu\n", 213 (unsigned long long)q->limits.max_write_same_sectors << 9); 214 } 215 216 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 217 { 218 return sprintf(page, "%llu\n", 219 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 220 } 221 222 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) 223 { 224 unsigned long long max_sectors = q->limits.max_zone_append_sectors; 225 226 return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); 227 } 228 229 static ssize_t 230 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 231 { 232 unsigned long max_sectors_kb, 233 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 234 page_kb = 1 << (PAGE_SHIFT - 10); 235 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 236 237 if (ret < 0) 238 return ret; 239 240 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 241 q->limits.max_dev_sectors >> 1); 242 243 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 244 return -EINVAL; 245 246 spin_lock_irq(&q->queue_lock); 247 q->limits.max_sectors = max_sectors_kb << 1; 248 q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 249 spin_unlock_irq(&q->queue_lock); 250 251 return ret; 252 } 253 254 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 255 { 256 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 257 258 return queue_var_show(max_hw_sectors_kb, (page)); 259 } 260 261 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 262 static ssize_t \ 263 queue_##name##_show(struct request_queue *q, char *page) \ 264 { \ 265 int bit; \ 266 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 267 return queue_var_show(neg ? !bit : bit, page); \ 268 } \ 269 static ssize_t \ 270 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ 271 { \ 272 unsigned long val; \ 273 ssize_t ret; \ 274 ret = queue_var_store(&val, page, count); \ 275 if (ret < 0) \ 276 return ret; \ 277 if (neg) \ 278 val = !val; \ 279 \ 280 if (val) \ 281 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ 282 else \ 283 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ 284 return ret; \ 285 } 286 287 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 288 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 289 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 290 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); 291 #undef QUEUE_SYSFS_BIT_FNS 292 293 static ssize_t queue_zoned_show(struct request_queue *q, char *page) 294 { 295 switch (blk_queue_zoned_model(q)) { 296 case BLK_ZONED_HA: 297 return sprintf(page, "host-aware\n"); 298 case BLK_ZONED_HM: 299 return sprintf(page, "host-managed\n"); 300 default: 301 return sprintf(page, "none\n"); 302 } 303 } 304 305 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) 306 { 307 return queue_var_show(blk_queue_nr_zones(q), page); 308 } 309 310 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) 311 { 312 return queue_var_show(queue_max_open_zones(q), page); 313 } 314 315 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) 316 { 317 return queue_var_show(queue_max_active_zones(q), page); 318 } 319 320 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 321 { 322 return queue_var_show((blk_queue_nomerges(q) << 1) | 323 blk_queue_noxmerges(q), page); 324 } 325 326 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 327 size_t count) 328 { 329 unsigned long nm; 330 ssize_t ret = queue_var_store(&nm, page, count); 331 332 if (ret < 0) 333 return ret; 334 335 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 336 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 337 if (nm == 2) 338 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 339 else if (nm) 340 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 341 342 return ret; 343 } 344 345 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 346 { 347 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 348 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 349 350 return queue_var_show(set << force, page); 351 } 352 353 static ssize_t 354 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 355 { 356 ssize_t ret = -EINVAL; 357 #ifdef CONFIG_SMP 358 unsigned long val; 359 360 ret = queue_var_store(&val, page, count); 361 if (ret < 0) 362 return ret; 363 364 if (val == 2) { 365 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 366 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 367 } else if (val == 1) { 368 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 369 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 370 } else if (val == 0) { 371 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 372 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 373 } 374 #endif 375 return ret; 376 } 377 378 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 379 { 380 int val; 381 382 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) 383 val = BLK_MQ_POLL_CLASSIC; 384 else 385 val = q->poll_nsec / 1000; 386 387 return sprintf(page, "%d\n", val); 388 } 389 390 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 391 size_t count) 392 { 393 int err, val; 394 395 if (!q->mq_ops || !q->mq_ops->poll) 396 return -EINVAL; 397 398 err = kstrtoint(page, 10, &val); 399 if (err < 0) 400 return err; 401 402 if (val == BLK_MQ_POLL_CLASSIC) 403 q->poll_nsec = BLK_MQ_POLL_CLASSIC; 404 else if (val >= 0) 405 q->poll_nsec = val * 1000; 406 else 407 return -EINVAL; 408 409 return count; 410 } 411 412 static ssize_t queue_poll_show(struct request_queue *q, char *page) 413 { 414 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 415 } 416 417 static ssize_t queue_poll_store(struct request_queue *q, const char *page, 418 size_t count) 419 { 420 unsigned long poll_on; 421 ssize_t ret; 422 423 if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || 424 !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) 425 return -EINVAL; 426 427 ret = queue_var_store(&poll_on, page, count); 428 if (ret < 0) 429 return ret; 430 431 if (poll_on) 432 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 433 else 434 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 435 436 return ret; 437 } 438 439 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) 440 { 441 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); 442 } 443 444 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, 445 size_t count) 446 { 447 unsigned int val; 448 int err; 449 450 err = kstrtou32(page, 10, &val); 451 if (err || val == 0) 452 return -EINVAL; 453 454 blk_queue_rq_timeout(q, msecs_to_jiffies(val)); 455 456 return count; 457 } 458 459 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 460 { 461 if (!wbt_rq_qos(q)) 462 return -EINVAL; 463 464 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); 465 } 466 467 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 468 size_t count) 469 { 470 struct rq_qos *rqos; 471 ssize_t ret; 472 s64 val; 473 474 ret = queue_var_store64(&val, page); 475 if (ret < 0) 476 return ret; 477 if (val < -1) 478 return -EINVAL; 479 480 rqos = wbt_rq_qos(q); 481 if (!rqos) { 482 ret = wbt_init(q); 483 if (ret) 484 return ret; 485 } 486 487 if (val == -1) 488 val = wbt_default_latency_nsec(q); 489 else if (val >= 0) 490 val *= 1000ULL; 491 492 if (wbt_get_min_lat(q) == val) 493 return count; 494 495 /* 496 * Ensure that the queue is idled, in case the latency update 497 * ends up either enabling or disabling wbt completely. We can't 498 * have IO inflight if that happens. 499 */ 500 blk_mq_freeze_queue(q); 501 blk_mq_quiesce_queue(q); 502 503 wbt_set_min_lat(q, val); 504 505 blk_mq_unquiesce_queue(q); 506 blk_mq_unfreeze_queue(q); 507 508 return count; 509 } 510 511 static ssize_t queue_wc_show(struct request_queue *q, char *page) 512 { 513 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 514 return sprintf(page, "write back\n"); 515 516 return sprintf(page, "write through\n"); 517 } 518 519 static ssize_t queue_wc_store(struct request_queue *q, const char *page, 520 size_t count) 521 { 522 int set = -1; 523 524 if (!strncmp(page, "write back", 10)) 525 set = 1; 526 else if (!strncmp(page, "write through", 13) || 527 !strncmp(page, "none", 4)) 528 set = 0; 529 530 if (set == -1) 531 return -EINVAL; 532 533 if (set) 534 blk_queue_flag_set(QUEUE_FLAG_WC, q); 535 else 536 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 537 538 return count; 539 } 540 541 static ssize_t queue_fua_show(struct request_queue *q, char *page) 542 { 543 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); 544 } 545 546 static ssize_t queue_dax_show(struct request_queue *q, char *page) 547 { 548 return queue_var_show(blk_queue_dax(q), page); 549 } 550 551 #define QUEUE_RO_ENTRY(_prefix, _name) \ 552 static struct queue_sysfs_entry _prefix##_entry = { \ 553 .attr = { .name = _name, .mode = 0444 }, \ 554 .show = _prefix##_show, \ 555 }; 556 557 #define QUEUE_RW_ENTRY(_prefix, _name) \ 558 static struct queue_sysfs_entry _prefix##_entry = { \ 559 .attr = { .name = _name, .mode = 0644 }, \ 560 .show = _prefix##_show, \ 561 .store = _prefix##_store, \ 562 }; 563 564 QUEUE_RW_ENTRY(queue_requests, "nr_requests"); 565 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); 566 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); 567 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); 568 QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); 569 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); 570 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); 571 QUEUE_RW_ENTRY(elv_iosched, "scheduler"); 572 573 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); 574 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); 575 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); 576 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); 577 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); 578 579 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); 580 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); 581 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); 582 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); 583 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); 584 585 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); 586 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); 587 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); 588 589 QUEUE_RO_ENTRY(queue_zoned, "zoned"); 590 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); 591 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); 592 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); 593 594 QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); 595 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); 596 QUEUE_RW_ENTRY(queue_poll, "io_poll"); 597 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); 598 QUEUE_RW_ENTRY(queue_wc, "write_cache"); 599 QUEUE_RO_ENTRY(queue_fua, "fua"); 600 QUEUE_RO_ENTRY(queue_dax, "dax"); 601 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); 602 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); 603 604 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 605 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); 606 #endif 607 608 /* legacy alias for logical_block_size: */ 609 static struct queue_sysfs_entry queue_hw_sector_size_entry = { 610 .attr = {.name = "hw_sector_size", .mode = 0444 }, 611 .show = queue_logical_block_size_show, 612 }; 613 614 QUEUE_RW_ENTRY(queue_nonrot, "rotational"); 615 QUEUE_RW_ENTRY(queue_iostats, "iostats"); 616 QUEUE_RW_ENTRY(queue_random, "add_random"); 617 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); 618 619 static struct attribute *queue_attrs[] = { 620 &queue_requests_entry.attr, 621 &queue_ra_entry.attr, 622 &queue_max_hw_sectors_entry.attr, 623 &queue_max_sectors_entry.attr, 624 &queue_max_segments_entry.attr, 625 &queue_max_discard_segments_entry.attr, 626 &queue_max_integrity_segments_entry.attr, 627 &queue_max_segment_size_entry.attr, 628 &elv_iosched_entry.attr, 629 &queue_hw_sector_size_entry.attr, 630 &queue_logical_block_size_entry.attr, 631 &queue_physical_block_size_entry.attr, 632 &queue_chunk_sectors_entry.attr, 633 &queue_io_min_entry.attr, 634 &queue_io_opt_entry.attr, 635 &queue_discard_granularity_entry.attr, 636 &queue_discard_max_entry.attr, 637 &queue_discard_max_hw_entry.attr, 638 &queue_discard_zeroes_data_entry.attr, 639 &queue_write_same_max_entry.attr, 640 &queue_write_zeroes_max_entry.attr, 641 &queue_zone_append_max_entry.attr, 642 &queue_nonrot_entry.attr, 643 &queue_zoned_entry.attr, 644 &queue_nr_zones_entry.attr, 645 &queue_max_open_zones_entry.attr, 646 &queue_max_active_zones_entry.attr, 647 &queue_nomerges_entry.attr, 648 &queue_rq_affinity_entry.attr, 649 &queue_iostats_entry.attr, 650 &queue_stable_writes_entry.attr, 651 &queue_random_entry.attr, 652 &queue_poll_entry.attr, 653 &queue_wc_entry.attr, 654 &queue_fua_entry.attr, 655 &queue_dax_entry.attr, 656 &queue_wb_lat_entry.attr, 657 &queue_poll_delay_entry.attr, 658 &queue_io_timeout_entry.attr, 659 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 660 &blk_throtl_sample_time_entry.attr, 661 #endif 662 NULL, 663 }; 664 665 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, 666 int n) 667 { 668 struct request_queue *q = 669 container_of(kobj, struct request_queue, kobj); 670 671 if (attr == &queue_io_timeout_entry.attr && 672 (!q->mq_ops || !q->mq_ops->timeout)) 673 return 0; 674 675 if ((attr == &queue_max_open_zones_entry.attr || 676 attr == &queue_max_active_zones_entry.attr) && 677 !blk_queue_is_zoned(q)) 678 return 0; 679 680 return attr->mode; 681 } 682 683 static struct attribute_group queue_attr_group = { 684 .attrs = queue_attrs, 685 .is_visible = queue_attr_visible, 686 }; 687 688 689 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 690 691 static ssize_t 692 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 693 { 694 struct queue_sysfs_entry *entry = to_queue(attr); 695 struct request_queue *q = 696 container_of(kobj, struct request_queue, kobj); 697 ssize_t res; 698 699 if (!entry->show) 700 return -EIO; 701 mutex_lock(&q->sysfs_lock); 702 res = entry->show(q, page); 703 mutex_unlock(&q->sysfs_lock); 704 return res; 705 } 706 707 static ssize_t 708 queue_attr_store(struct kobject *kobj, struct attribute *attr, 709 const char *page, size_t length) 710 { 711 struct queue_sysfs_entry *entry = to_queue(attr); 712 struct request_queue *q; 713 ssize_t res; 714 715 if (!entry->store) 716 return -EIO; 717 718 q = container_of(kobj, struct request_queue, kobj); 719 mutex_lock(&q->sysfs_lock); 720 res = entry->store(q, page, length); 721 mutex_unlock(&q->sysfs_lock); 722 return res; 723 } 724 725 static void blk_free_queue_rcu(struct rcu_head *rcu_head) 726 { 727 struct request_queue *q = container_of(rcu_head, struct request_queue, 728 rcu_head); 729 kmem_cache_free(blk_requestq_cachep, q); 730 } 731 732 /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ 733 static void blk_exit_queue(struct request_queue *q) 734 { 735 /* 736 * Since the I/O scheduler exit code may access cgroup information, 737 * perform I/O scheduler exit before disassociating from the block 738 * cgroup controller. 739 */ 740 if (q->elevator) { 741 ioc_clear_queue(q); 742 __elevator_exit(q, q->elevator); 743 } 744 745 /* 746 * Remove all references to @q from the block cgroup controller before 747 * restoring @q->queue_lock to avoid that restoring this pointer causes 748 * e.g. blkcg_print_blkgs() to crash. 749 */ 750 blkcg_exit_queue(q); 751 752 /* 753 * Since the cgroup code may dereference the @q->backing_dev_info 754 * pointer, only decrease its reference count after having removed the 755 * association with the block cgroup controller. 756 */ 757 bdi_put(q->backing_dev_info); 758 } 759 760 /** 761 * blk_release_queue - releases all allocated resources of the request_queue 762 * @kobj: pointer to a kobject, whose container is a request_queue 763 * 764 * This function releases all allocated resources of the request queue. 765 * 766 * The struct request_queue refcount is incremented with blk_get_queue() and 767 * decremented with blk_put_queue(). Once the refcount reaches 0 this function 768 * is called. 769 * 770 * For drivers that have a request_queue on a gendisk and added with 771 * __device_add_disk() the refcount to request_queue will reach 0 with 772 * the last put_disk() called by the driver. For drivers which don't use 773 * __device_add_disk() this happens with blk_cleanup_queue(). 774 * 775 * Drivers exist which depend on the release of the request_queue to be 776 * synchronous, it should not be deferred. 777 * 778 * Context: can sleep 779 */ 780 static void blk_release_queue(struct kobject *kobj) 781 { 782 struct request_queue *q = 783 container_of(kobj, struct request_queue, kobj); 784 785 might_sleep(); 786 787 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 788 blk_stat_remove_callback(q, q->poll_cb); 789 blk_stat_free_callback(q->poll_cb); 790 791 blk_free_queue_stats(q->stats); 792 793 if (queue_is_mq(q)) { 794 struct blk_mq_hw_ctx *hctx; 795 int i; 796 797 cancel_delayed_work_sync(&q->requeue_work); 798 799 queue_for_each_hw_ctx(q, hctx, i) 800 cancel_delayed_work_sync(&hctx->run_work); 801 } 802 803 blk_exit_queue(q); 804 805 blk_queue_free_zone_bitmaps(q); 806 807 if (queue_is_mq(q)) 808 blk_mq_release(q); 809 810 blk_trace_shutdown(q); 811 mutex_lock(&q->debugfs_mutex); 812 debugfs_remove_recursive(q->debugfs_dir); 813 mutex_unlock(&q->debugfs_mutex); 814 815 if (queue_is_mq(q)) 816 blk_mq_debugfs_unregister(q); 817 818 bioset_exit(&q->bio_split); 819 820 ida_simple_remove(&blk_queue_ida, q->id); 821 call_rcu(&q->rcu_head, blk_free_queue_rcu); 822 } 823 824 static const struct sysfs_ops queue_sysfs_ops = { 825 .show = queue_attr_show, 826 .store = queue_attr_store, 827 }; 828 829 struct kobj_type blk_queue_ktype = { 830 .sysfs_ops = &queue_sysfs_ops, 831 .release = blk_release_queue, 832 }; 833 834 /** 835 * blk_register_queue - register a block layer queue with sysfs 836 * @disk: Disk of which the request queue should be registered with sysfs. 837 */ 838 int blk_register_queue(struct gendisk *disk) 839 { 840 int ret; 841 struct device *dev = disk_to_dev(disk); 842 struct request_queue *q = disk->queue; 843 844 if (WARN_ON(!q)) 845 return -ENXIO; 846 847 WARN_ONCE(blk_queue_registered(q), 848 "%s is registering an already registered queue\n", 849 kobject_name(&dev->kobj)); 850 851 /* 852 * SCSI probing may synchronously create and destroy a lot of 853 * request_queues for non-existent devices. Shutting down a fully 854 * functional queue takes measureable wallclock time as RCU grace 855 * periods are involved. To avoid excessive latency in these 856 * cases, a request_queue starts out in a degraded mode which is 857 * faster to shut down and is made fully functional here as 858 * request_queues for non-existent devices never get registered. 859 */ 860 if (!blk_queue_init_done(q)) { 861 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); 862 percpu_ref_switch_to_percpu(&q->q_usage_counter); 863 } 864 865 blk_queue_update_readahead(q); 866 867 ret = blk_trace_init_sysfs(dev); 868 if (ret) 869 return ret; 870 871 mutex_lock(&q->sysfs_dir_lock); 872 873 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 874 if (ret < 0) { 875 blk_trace_remove_sysfs(dev); 876 goto unlock; 877 } 878 879 ret = sysfs_create_group(&q->kobj, &queue_attr_group); 880 if (ret) { 881 blk_trace_remove_sysfs(dev); 882 kobject_del(&q->kobj); 883 kobject_put(&dev->kobj); 884 goto unlock; 885 } 886 887 mutex_lock(&q->debugfs_mutex); 888 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), 889 blk_debugfs_root); 890 mutex_unlock(&q->debugfs_mutex); 891 892 if (queue_is_mq(q)) { 893 __blk_mq_register_dev(dev, q); 894 blk_mq_debugfs_register(q); 895 } 896 897 mutex_lock(&q->sysfs_lock); 898 if (q->elevator) { 899 ret = elv_register_queue(q, false); 900 if (ret) { 901 mutex_unlock(&q->sysfs_lock); 902 mutex_unlock(&q->sysfs_dir_lock); 903 kobject_del(&q->kobj); 904 blk_trace_remove_sysfs(dev); 905 kobject_put(&dev->kobj); 906 return ret; 907 } 908 } 909 910 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 911 wbt_enable_default(q); 912 blk_throtl_register_queue(q); 913 914 /* Now everything is ready and send out KOBJ_ADD uevent */ 915 kobject_uevent(&q->kobj, KOBJ_ADD); 916 if (q->elevator) 917 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); 918 mutex_unlock(&q->sysfs_lock); 919 920 ret = 0; 921 unlock: 922 mutex_unlock(&q->sysfs_dir_lock); 923 return ret; 924 } 925 EXPORT_SYMBOL_GPL(blk_register_queue); 926 927 /** 928 * blk_unregister_queue - counterpart of blk_register_queue() 929 * @disk: Disk of which the request queue should be unregistered from sysfs. 930 * 931 * Note: the caller is responsible for guaranteeing that this function is called 932 * after blk_register_queue() has finished. 933 */ 934 void blk_unregister_queue(struct gendisk *disk) 935 { 936 struct request_queue *q = disk->queue; 937 938 if (WARN_ON(!q)) 939 return; 940 941 /* Return early if disk->queue was never registered. */ 942 if (!blk_queue_registered(q)) 943 return; 944 945 /* 946 * Since sysfs_remove_dir() prevents adding new directory entries 947 * before removal of existing entries starts, protect against 948 * concurrent elv_iosched_store() calls. 949 */ 950 mutex_lock(&q->sysfs_lock); 951 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 952 mutex_unlock(&q->sysfs_lock); 953 954 mutex_lock(&q->sysfs_dir_lock); 955 /* 956 * Remove the sysfs attributes before unregistering the queue data 957 * structures that can be modified through sysfs. 958 */ 959 if (queue_is_mq(q)) 960 blk_mq_unregister_dev(disk_to_dev(disk), q); 961 962 kobject_uevent(&q->kobj, KOBJ_REMOVE); 963 kobject_del(&q->kobj); 964 blk_trace_remove_sysfs(disk_to_dev(disk)); 965 966 mutex_lock(&q->sysfs_lock); 967 if (q->elevator) 968 elv_unregister_queue(q); 969 mutex_unlock(&q->sysfs_lock); 970 mutex_unlock(&q->sysfs_dir_lock); 971 972 kobject_put(&disk_to_dev(disk)->kobj); 973 } 974