1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 28324aa91SJens Axboe /* 38324aa91SJens Axboe * Functions related to sysfs handling 48324aa91SJens Axboe */ 58324aa91SJens Axboe #include <linux/kernel.h> 65a0e3ad6STejun Heo #include <linux/slab.h> 78324aa91SJens Axboe #include <linux/module.h> 88324aa91SJens Axboe #include <linux/bio.h> 98324aa91SJens Axboe #include <linux/blkdev.h> 1066114cadSTejun Heo #include <linux/backing-dev.h> 118324aa91SJens Axboe #include <linux/blktrace_api.h> 12320ae51fSJens Axboe #include <linux/blk-mq.h> 13eea8f41cSTejun Heo #include <linux/blk-cgroup.h> 1485e0cbbbSLuis Chamberlain #include <linux/debugfs.h> 158324aa91SJens Axboe 168324aa91SJens Axboe #include "blk.h" 173edcc0ceSMing Lei #include "blk-mq.h" 18d173a251SOmar Sandoval #include "blk-mq-debugfs.h" 1987760e5eSJens Axboe #include "blk-wbt.h" 208324aa91SJens Axboe 218324aa91SJens Axboe struct queue_sysfs_entry { 228324aa91SJens Axboe struct attribute attr; 238324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 248324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 258324aa91SJens Axboe }; 268324aa91SJens Axboe 278324aa91SJens Axboe static ssize_t 289cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 298324aa91SJens Axboe { 309cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 318324aa91SJens Axboe } 328324aa91SJens Axboe 338324aa91SJens Axboe static ssize_t 348324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 358324aa91SJens Axboe { 36b1f3b64dSDave Reisner int err; 37b1f3b64dSDave Reisner unsigned long v; 388324aa91SJens Axboe 39ed751e68SJingoo Han err = kstrtoul(page, 10, &v); 40b1f3b64dSDave Reisner if (err || v > UINT_MAX) 41b1f3b64dSDave Reisner return -EINVAL; 42b1f3b64dSDave Reisner 43b1f3b64dSDave Reisner *var = v; 44b1f3b64dSDave Reisner 458324aa91SJens Axboe return count; 468324aa91SJens Axboe } 478324aa91SJens Axboe 4880e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page) 4987760e5eSJens Axboe { 5087760e5eSJens Axboe int err; 5180e091d1SJens Axboe s64 v; 5287760e5eSJens Axboe 5380e091d1SJens Axboe err = kstrtos64(page, 10, &v); 5487760e5eSJens Axboe if (err < 0) 5587760e5eSJens Axboe return err; 5687760e5eSJens Axboe 5787760e5eSJens Axboe *var = v; 5887760e5eSJens Axboe return 0; 5987760e5eSJens Axboe } 6087760e5eSJens Axboe 618324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 628324aa91SJens Axboe { 6328af7428SMax Gurtovoy return queue_var_show(q->nr_requests, page); 648324aa91SJens Axboe } 658324aa91SJens Axboe 668324aa91SJens Axboe static ssize_t 678324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 688324aa91SJens Axboe { 698324aa91SJens Axboe unsigned long nr; 70e3a2b3f9SJens Axboe int ret, err; 71b8a9ae77SJens Axboe 72344e9ffcSJens Axboe if (!queue_is_mq(q)) 73b8a9ae77SJens Axboe return -EINVAL; 74b8a9ae77SJens Axboe 75b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 76b1f3b64dSDave Reisner if (ret < 0) 77b1f3b64dSDave Reisner return ret; 78b1f3b64dSDave Reisner 798324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 808324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 818324aa91SJens Axboe 82e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr); 83e3a2b3f9SJens Axboe if (err) 84e3a2b3f9SJens Axboe return err; 85a051661cSTejun Heo 868324aa91SJens Axboe return ret; 878324aa91SJens Axboe } 888324aa91SJens Axboe 898324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 908324aa91SJens Axboe { 91edb0872fSChristoph Hellwig unsigned long ra_kb; 928324aa91SJens Axboe 93*d152c682SChristoph Hellwig if (!q->disk) 94edb0872fSChristoph Hellwig return -EINVAL; 95*d152c682SChristoph Hellwig ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); 968c390ff9SMax Gurtovoy return queue_var_show(ra_kb, page); 978324aa91SJens Axboe } 988324aa91SJens Axboe 998324aa91SJens Axboe static ssize_t 1008324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 1018324aa91SJens Axboe { 1028324aa91SJens Axboe unsigned long ra_kb; 103edb0872fSChristoph Hellwig ssize_t ret; 1048324aa91SJens Axboe 105*d152c682SChristoph Hellwig if (!q->disk) 106edb0872fSChristoph Hellwig return -EINVAL; 107edb0872fSChristoph Hellwig ret = queue_var_store(&ra_kb, page, count); 108b1f3b64dSDave Reisner if (ret < 0) 109b1f3b64dSDave Reisner return ret; 110*d152c682SChristoph Hellwig q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 1118324aa91SJens Axboe return ret; 1128324aa91SJens Axboe } 1138324aa91SJens Axboe 1148324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1158324aa91SJens Axboe { 116ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1178324aa91SJens Axboe 1188c390ff9SMax Gurtovoy return queue_var_show(max_sectors_kb, page); 1198324aa91SJens Axboe } 1208324aa91SJens Axboe 121c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 122c77a5710SMartin K. Petersen { 1238c390ff9SMax Gurtovoy return queue_var_show(queue_max_segments(q), page); 124c77a5710SMartin K. Petersen } 125c77a5710SMartin K. Petersen 1261e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q, 1271e739730SChristoph Hellwig char *page) 1281e739730SChristoph Hellwig { 1298c390ff9SMax Gurtovoy return queue_var_show(queue_max_discard_segments(q), page); 1301e739730SChristoph Hellwig } 1311e739730SChristoph Hellwig 13213f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 13313f05c8dSMartin K. Petersen { 1348c390ff9SMax Gurtovoy return queue_var_show(q->limits.max_integrity_segments, page); 13513f05c8dSMartin K. Petersen } 13613f05c8dSMartin K. Petersen 137c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 138c77a5710SMartin K. Petersen { 1398c390ff9SMax Gurtovoy return queue_var_show(queue_max_segment_size(q), page); 140c77a5710SMartin K. Petersen } 141c77a5710SMartin K. Petersen 142e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 143e68b903cSMartin K. Petersen { 144e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 145e68b903cSMartin K. Petersen } 146e68b903cSMartin K. Petersen 147c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 148c72758f3SMartin K. Petersen { 149c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 150c72758f3SMartin K. Petersen } 151c72758f3SMartin K. Petersen 15287caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 15387caf97cSHannes Reinecke { 15487caf97cSHannes Reinecke return queue_var_show(q->limits.chunk_sectors, page); 15587caf97cSHannes Reinecke } 15687caf97cSHannes Reinecke 157c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 158c72758f3SMartin K. Petersen { 159c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 160c72758f3SMartin K. Petersen } 161c72758f3SMartin K. Petersen 162c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 163c72758f3SMartin K. Petersen { 164c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1658324aa91SJens Axboe } 1668324aa91SJens Axboe 16786b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 16886b37281SMartin K. Petersen { 16986b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 17086b37281SMartin K. Petersen } 17186b37281SMartin K. Petersen 1720034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 1730034af03SJens Axboe { 1740034af03SJens Axboe 17518f922d0SAlan return sprintf(page, "%llu\n", 17618f922d0SAlan (unsigned long long)q->limits.max_hw_discard_sectors << 9); 1770034af03SJens Axboe } 1780034af03SJens Axboe 17986b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 18086b37281SMartin K. Petersen { 181a934a00aSMartin K. Petersen return sprintf(page, "%llu\n", 182a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9); 18386b37281SMartin K. Petersen } 18486b37281SMartin K. Petersen 1850034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q, 1860034af03SJens Axboe const char *page, size_t count) 1870034af03SJens Axboe { 1880034af03SJens Axboe unsigned long max_discard; 1890034af03SJens Axboe ssize_t ret = queue_var_store(&max_discard, page, count); 1900034af03SJens Axboe 1910034af03SJens Axboe if (ret < 0) 1920034af03SJens Axboe return ret; 1930034af03SJens Axboe 1940034af03SJens Axboe if (max_discard & (q->limits.discard_granularity - 1)) 1950034af03SJens Axboe return -EINVAL; 1960034af03SJens Axboe 1970034af03SJens Axboe max_discard >>= 9; 1980034af03SJens Axboe if (max_discard > UINT_MAX) 1990034af03SJens Axboe return -EINVAL; 2000034af03SJens Axboe 2010034af03SJens Axboe if (max_discard > q->limits.max_hw_discard_sectors) 2020034af03SJens Axboe max_discard = q->limits.max_hw_discard_sectors; 2030034af03SJens Axboe 2040034af03SJens Axboe q->limits.max_discard_sectors = max_discard; 2050034af03SJens Axboe return ret; 2060034af03SJens Axboe } 2070034af03SJens Axboe 20898262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 20998262f27SMartin K. Petersen { 21048920ff2SChristoph Hellwig return queue_var_show(0, page); 21198262f27SMartin K. Petersen } 21298262f27SMartin K. Petersen 2134363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 2144363ac7cSMartin K. Petersen { 2154363ac7cSMartin K. Petersen return sprintf(page, "%llu\n", 2164363ac7cSMartin K. Petersen (unsigned long long)q->limits.max_write_same_sectors << 9); 2174363ac7cSMartin K. Petersen } 2184363ac7cSMartin K. Petersen 219a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 220a6f0788eSChaitanya Kulkarni { 221a6f0788eSChaitanya Kulkarni return sprintf(page, "%llu\n", 222a6f0788eSChaitanya Kulkarni (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 223a6f0788eSChaitanya Kulkarni } 2244363ac7cSMartin K. Petersen 225a805a4faSDamien Le Moal static ssize_t queue_zone_write_granularity_show(struct request_queue *q, 226a805a4faSDamien Le Moal char *page) 227a805a4faSDamien Le Moal { 228a805a4faSDamien Le Moal return queue_var_show(queue_zone_write_granularity(q), page); 229a805a4faSDamien Le Moal } 230a805a4faSDamien Le Moal 2310512a75bSKeith Busch static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) 2320512a75bSKeith Busch { 2330512a75bSKeith Busch unsigned long long max_sectors = q->limits.max_zone_append_sectors; 2340512a75bSKeith Busch 2350512a75bSKeith Busch return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); 2360512a75bSKeith Busch } 2370512a75bSKeith Busch 2388324aa91SJens Axboe static ssize_t 2398324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 2408324aa91SJens Axboe { 2418324aa91SJens Axboe unsigned long max_sectors_kb, 242ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 24309cbfeafSKirill A. Shutemov page_kb = 1 << (PAGE_SHIFT - 10); 2448324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 2458324aa91SJens Axboe 246b1f3b64dSDave Reisner if (ret < 0) 247b1f3b64dSDave Reisner return ret; 248b1f3b64dSDave Reisner 249ca369d51SMartin K. Petersen max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 250ca369d51SMartin K. Petersen q->limits.max_dev_sectors >> 1); 251ca369d51SMartin K. Petersen 2528324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 2538324aa91SJens Axboe return -EINVAL; 2547c239517SWu Fengguang 2550d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 256c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 257*d152c682SChristoph Hellwig if (q->disk) 258*d152c682SChristoph Hellwig q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 2590d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 2608324aa91SJens Axboe 2618324aa91SJens Axboe return ret; 2628324aa91SJens Axboe } 2638324aa91SJens Axboe 2648324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 2658324aa91SJens Axboe { 266ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 2678324aa91SJens Axboe 2688c390ff9SMax Gurtovoy return queue_var_show(max_hw_sectors_kb, page); 2698324aa91SJens Axboe } 2708324aa91SJens Axboe 27128af7428SMax Gurtovoy static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) 27228af7428SMax Gurtovoy { 2738c390ff9SMax Gurtovoy return queue_var_show(q->limits.virt_boundary_mask, page); 27428af7428SMax Gurtovoy } 27528af7428SMax Gurtovoy 276956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 277956bcb7cSJens Axboe static ssize_t \ 278fc93fe14SChristoph Hellwig queue_##name##_show(struct request_queue *q, char *page) \ 279956bcb7cSJens Axboe { \ 280956bcb7cSJens Axboe int bit; \ 281956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 282956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 283956bcb7cSJens Axboe } \ 284956bcb7cSJens Axboe static ssize_t \ 285fc93fe14SChristoph Hellwig queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ 286956bcb7cSJens Axboe { \ 287956bcb7cSJens Axboe unsigned long val; \ 288956bcb7cSJens Axboe ssize_t ret; \ 289956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 290c678ef52SArnd Bergmann if (ret < 0) \ 291c678ef52SArnd Bergmann return ret; \ 292956bcb7cSJens Axboe if (neg) \ 293956bcb7cSJens Axboe val = !val; \ 294956bcb7cSJens Axboe \ 295956bcb7cSJens Axboe if (val) \ 2968814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ 297956bcb7cSJens Axboe else \ 2988814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ 299956bcb7cSJens Axboe return ret; \ 3001308835fSBartlomiej Zolnierkiewicz } 3011308835fSBartlomiej Zolnierkiewicz 302956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 303956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 304956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 3051cb039f3SChristoph Hellwig QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); 306956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 3071308835fSBartlomiej Zolnierkiewicz 308797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page) 309797476b8SDamien Le Moal { 310797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 311797476b8SDamien Le Moal case BLK_ZONED_HA: 312797476b8SDamien Le Moal return sprintf(page, "host-aware\n"); 313797476b8SDamien Le Moal case BLK_ZONED_HM: 314797476b8SDamien Le Moal return sprintf(page, "host-managed\n"); 315797476b8SDamien Le Moal default: 316797476b8SDamien Le Moal return sprintf(page, "none\n"); 317797476b8SDamien Le Moal } 318797476b8SDamien Le Moal } 319797476b8SDamien Le Moal 320965b652eSDamien Le Moal static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) 321965b652eSDamien Le Moal { 322965b652eSDamien Le Moal return queue_var_show(blk_queue_nr_zones(q), page); 323965b652eSDamien Le Moal } 324965b652eSDamien Le Moal 325e15864f8SNiklas Cassel static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) 326e15864f8SNiklas Cassel { 327e15864f8SNiklas Cassel return queue_var_show(queue_max_open_zones(q), page); 328e15864f8SNiklas Cassel } 329e15864f8SNiklas Cassel 330659bf827SNiklas Cassel static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) 331659bf827SNiklas Cassel { 332659bf827SNiklas Cassel return queue_var_show(queue_max_active_zones(q), page); 333659bf827SNiklas Cassel } 334659bf827SNiklas Cassel 335ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 336ac9fafa1SAlan D. Brunelle { 337488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 338488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 339ac9fafa1SAlan D. Brunelle } 340ac9fafa1SAlan D. Brunelle 341ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 342ac9fafa1SAlan D. Brunelle size_t count) 343ac9fafa1SAlan D. Brunelle { 344ac9fafa1SAlan D. Brunelle unsigned long nm; 345ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 346ac9fafa1SAlan D. Brunelle 347b1f3b64dSDave Reisner if (ret < 0) 348b1f3b64dSDave Reisner return ret; 349b1f3b64dSDave Reisner 35057d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 35157d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 352488991e2SAlan D. Brunelle if (nm == 2) 35357d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 354488991e2SAlan D. Brunelle else if (nm) 35557d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 3561308835fSBartlomiej Zolnierkiewicz 357ac9fafa1SAlan D. Brunelle return ret; 358ac9fafa1SAlan D. Brunelle } 359ac9fafa1SAlan D. Brunelle 360c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 361c7c22e4dSJens Axboe { 3629cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 3635757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 364c7c22e4dSJens Axboe 3655757a6d7SDan Williams return queue_var_show(set << force, page); 366c7c22e4dSJens Axboe } 367c7c22e4dSJens Axboe 368c7c22e4dSJens Axboe static ssize_t 369c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 370c7c22e4dSJens Axboe { 371c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 3720a06ff06SChristoph Hellwig #ifdef CONFIG_SMP 373c7c22e4dSJens Axboe unsigned long val; 374c7c22e4dSJens Axboe 375c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 376b1f3b64dSDave Reisner if (ret < 0) 377b1f3b64dSDave Reisner return ret; 378b1f3b64dSDave Reisner 379e8037d49SEric Seppanen if (val == 2) { 38057d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 38157d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 382e8037d49SEric Seppanen } else if (val == 1) { 38357d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 38457d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 385e8037d49SEric Seppanen } else if (val == 0) { 38657d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 38757d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 3885757a6d7SDan Williams } 389c7c22e4dSJens Axboe #endif 390c7c22e4dSJens Axboe return ret; 391c7c22e4dSJens Axboe } 3928324aa91SJens Axboe 39306426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 39406426adfSJens Axboe { 39564f1c21eSJens Axboe int val; 39664f1c21eSJens Axboe 39729ece8b4SYufen Yu if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) 39829ece8b4SYufen Yu val = BLK_MQ_POLL_CLASSIC; 39964f1c21eSJens Axboe else 40064f1c21eSJens Axboe val = q->poll_nsec / 1000; 40164f1c21eSJens Axboe 40264f1c21eSJens Axboe return sprintf(page, "%d\n", val); 40306426adfSJens Axboe } 40406426adfSJens Axboe 40506426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 40606426adfSJens Axboe size_t count) 40706426adfSJens Axboe { 40864f1c21eSJens Axboe int err, val; 40906426adfSJens Axboe 41006426adfSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 41106426adfSJens Axboe return -EINVAL; 41206426adfSJens Axboe 41364f1c21eSJens Axboe err = kstrtoint(page, 10, &val); 41464f1c21eSJens Axboe if (err < 0) 41564f1c21eSJens Axboe return err; 41606426adfSJens Axboe 41729ece8b4SYufen Yu if (val == BLK_MQ_POLL_CLASSIC) 41829ece8b4SYufen Yu q->poll_nsec = BLK_MQ_POLL_CLASSIC; 41929ece8b4SYufen Yu else if (val >= 0) 42064f1c21eSJens Axboe q->poll_nsec = val * 1000; 42129ece8b4SYufen Yu else 42229ece8b4SYufen Yu return -EINVAL; 42364f1c21eSJens Axboe 42464f1c21eSJens Axboe return count; 42506426adfSJens Axboe } 42606426adfSJens Axboe 42705229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page) 42805229beeSJens Axboe { 42905229beeSJens Axboe return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 43005229beeSJens Axboe } 43105229beeSJens Axboe 43205229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page, 43305229beeSJens Axboe size_t count) 43405229beeSJens Axboe { 43505229beeSJens Axboe unsigned long poll_on; 43605229beeSJens Axboe ssize_t ret; 43705229beeSJens Axboe 438cd19181bSMing Lei if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || 439cd19181bSMing Lei !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) 44005229beeSJens Axboe return -EINVAL; 44105229beeSJens Axboe 44205229beeSJens Axboe ret = queue_var_store(&poll_on, page, count); 44305229beeSJens Axboe if (ret < 0) 44405229beeSJens Axboe return ret; 44505229beeSJens Axboe 4466b09b4d3SJeffle Xu if (poll_on) { 4478814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_POLL, q); 4486b09b4d3SJeffle Xu } else { 4496b09b4d3SJeffle Xu blk_mq_freeze_queue(q); 4508814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 4516b09b4d3SJeffle Xu blk_mq_unfreeze_queue(q); 4526b09b4d3SJeffle Xu } 45305229beeSJens Axboe 45405229beeSJens Axboe return ret; 45505229beeSJens Axboe } 45605229beeSJens Axboe 45765cd1d13SWeiping Zhang static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) 45865cd1d13SWeiping Zhang { 45965cd1d13SWeiping Zhang return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); 46065cd1d13SWeiping Zhang } 46165cd1d13SWeiping Zhang 46265cd1d13SWeiping Zhang static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, 46365cd1d13SWeiping Zhang size_t count) 46465cd1d13SWeiping Zhang { 46565cd1d13SWeiping Zhang unsigned int val; 46665cd1d13SWeiping Zhang int err; 46765cd1d13SWeiping Zhang 46865cd1d13SWeiping Zhang err = kstrtou32(page, 10, &val); 46965cd1d13SWeiping Zhang if (err || val == 0) 47065cd1d13SWeiping Zhang return -EINVAL; 47165cd1d13SWeiping Zhang 47265cd1d13SWeiping Zhang blk_queue_rq_timeout(q, msecs_to_jiffies(val)); 47365cd1d13SWeiping Zhang 47465cd1d13SWeiping Zhang return count; 47565cd1d13SWeiping Zhang } 47665cd1d13SWeiping Zhang 47787760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 47887760e5eSJens Axboe { 479a7905043SJosef Bacik if (!wbt_rq_qos(q)) 48087760e5eSJens Axboe return -EINVAL; 48187760e5eSJens Axboe 482a7905043SJosef Bacik return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); 48387760e5eSJens Axboe } 48487760e5eSJens Axboe 48587760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 48687760e5eSJens Axboe size_t count) 48787760e5eSJens Axboe { 488a7905043SJosef Bacik struct rq_qos *rqos; 48987760e5eSJens Axboe ssize_t ret; 49080e091d1SJens Axboe s64 val; 49187760e5eSJens Axboe 49287760e5eSJens Axboe ret = queue_var_store64(&val, page); 49387760e5eSJens Axboe if (ret < 0) 49487760e5eSJens Axboe return ret; 495d62118b6SJens Axboe if (val < -1) 496d62118b6SJens Axboe return -EINVAL; 497d62118b6SJens Axboe 498a7905043SJosef Bacik rqos = wbt_rq_qos(q); 499a7905043SJosef Bacik if (!rqos) { 500d62118b6SJens Axboe ret = wbt_init(q); 501d62118b6SJens Axboe if (ret) 502d62118b6SJens Axboe return ret; 503d62118b6SJens Axboe } 50487760e5eSJens Axboe 50580e091d1SJens Axboe if (val == -1) 506a7905043SJosef Bacik val = wbt_default_latency_nsec(q); 50780e091d1SJens Axboe else if (val >= 0) 508a7905043SJosef Bacik val *= 1000ULL; 509d62118b6SJens Axboe 510b7143fe6SAleksei Zakharov if (wbt_get_min_lat(q) == val) 511b7143fe6SAleksei Zakharov return count; 512b7143fe6SAleksei Zakharov 513c125311dSJens Axboe /* 514c125311dSJens Axboe * Ensure that the queue is idled, in case the latency update 515c125311dSJens Axboe * ends up either enabling or disabling wbt completely. We can't 516c125311dSJens Axboe * have IO inflight if that happens. 517c125311dSJens Axboe */ 518c125311dSJens Axboe blk_mq_freeze_queue(q); 519c125311dSJens Axboe blk_mq_quiesce_queue(q); 52080e091d1SJens Axboe 521c125311dSJens Axboe wbt_set_min_lat(q, val); 522c125311dSJens Axboe 523c125311dSJens Axboe blk_mq_unquiesce_queue(q); 524c125311dSJens Axboe blk_mq_unfreeze_queue(q); 525c125311dSJens Axboe 52687760e5eSJens Axboe return count; 52787760e5eSJens Axboe } 52887760e5eSJens Axboe 52993e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page) 53093e9d8e8SJens Axboe { 53193e9d8e8SJens Axboe if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 53293e9d8e8SJens Axboe return sprintf(page, "write back\n"); 53393e9d8e8SJens Axboe 53493e9d8e8SJens Axboe return sprintf(page, "write through\n"); 53593e9d8e8SJens Axboe } 53693e9d8e8SJens Axboe 53793e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page, 53893e9d8e8SJens Axboe size_t count) 53993e9d8e8SJens Axboe { 54093e9d8e8SJens Axboe int set = -1; 54193e9d8e8SJens Axboe 54293e9d8e8SJens Axboe if (!strncmp(page, "write back", 10)) 54393e9d8e8SJens Axboe set = 1; 54493e9d8e8SJens Axboe else if (!strncmp(page, "write through", 13) || 54593e9d8e8SJens Axboe !strncmp(page, "none", 4)) 54693e9d8e8SJens Axboe set = 0; 54793e9d8e8SJens Axboe 54893e9d8e8SJens Axboe if (set == -1) 54993e9d8e8SJens Axboe return -EINVAL; 55093e9d8e8SJens Axboe 55193e9d8e8SJens Axboe if (set) 5528814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_WC, q); 55393e9d8e8SJens Axboe else 5548814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_WC, q); 55593e9d8e8SJens Axboe 55693e9d8e8SJens Axboe return count; 55793e9d8e8SJens Axboe } 55893e9d8e8SJens Axboe 5596fcefbe5SKent Overstreet static ssize_t queue_fua_show(struct request_queue *q, char *page) 5606fcefbe5SKent Overstreet { 5616fcefbe5SKent Overstreet return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); 5626fcefbe5SKent Overstreet } 5636fcefbe5SKent Overstreet 564ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page) 565ea6ca600SYigal Korman { 566ea6ca600SYigal Korman return queue_var_show(blk_queue_dax(q), page); 567ea6ca600SYigal Korman } 568ea6ca600SYigal Korman 56935626147SChristoph Hellwig #define QUEUE_RO_ENTRY(_prefix, _name) \ 57035626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = { \ 57135626147SChristoph Hellwig .attr = { .name = _name, .mode = 0444 }, \ 57235626147SChristoph Hellwig .show = _prefix##_show, \ 5738324aa91SJens Axboe }; 5748324aa91SJens Axboe 57535626147SChristoph Hellwig #define QUEUE_RW_ENTRY(_prefix, _name) \ 57635626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = { \ 57735626147SChristoph Hellwig .attr = { .name = _name, .mode = 0644 }, \ 57835626147SChristoph Hellwig .show = _prefix##_show, \ 57935626147SChristoph Hellwig .store = _prefix##_store, \ 5808324aa91SJens Axboe }; 5818324aa91SJens Axboe 58235626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_requests, "nr_requests"); 58335626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); 58435626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); 58535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); 58635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); 58735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); 58835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); 58935626147SChristoph Hellwig QUEUE_RW_ENTRY(elv_iosched, "scheduler"); 5908324aa91SJens Axboe 59135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); 59235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); 59335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); 59435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); 59535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); 5968324aa91SJens Axboe 59735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); 59835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); 59935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); 60035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); 60135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); 602c77a5710SMartin K. Petersen 60335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); 60435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); 60535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); 606a805a4faSDamien Le Moal QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); 6071e739730SChristoph Hellwig 60835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zoned, "zoned"); 60935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); 61035626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); 61135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); 61213f05c8dSMartin K. Petersen 61335626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); 61435626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); 61535626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll, "io_poll"); 61635626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); 61735626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wc, "write_cache"); 61835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_fua, "fua"); 61935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_dax, "dax"); 62035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); 62135626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); 62228af7428SMax Gurtovoy QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); 623c77a5710SMartin K. Petersen 62435626147SChristoph Hellwig #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 62535626147SChristoph Hellwig QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); 62635626147SChristoph Hellwig #endif 6278324aa91SJens Axboe 62835626147SChristoph Hellwig /* legacy alias for logical_block_size: */ 629e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 6305657a819SJoe Perches .attr = {.name = "hw_sector_size", .mode = 0444 }, 631e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 632e1defc4fSMartin K. Petersen }; 633e1defc4fSMartin K. Petersen 634fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_nonrot, "rotational"); 635fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_iostats, "iostats"); 636fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_random, "add_random"); 6371cb039f3SChristoph Hellwig QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); 638e2e1a148SJens Axboe 6394d25339eSWeiping Zhang static struct attribute *queue_attrs[] = { 6408324aa91SJens Axboe &queue_requests_entry.attr, 6418324aa91SJens Axboe &queue_ra_entry.attr, 6428324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 6438324aa91SJens Axboe &queue_max_sectors_entry.attr, 644c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 6451e739730SChristoph Hellwig &queue_max_discard_segments_entry.attr, 64613f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 647c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 64835626147SChristoph Hellwig &elv_iosched_entry.attr, 649e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 650e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 651c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 65287caf97cSHannes Reinecke &queue_chunk_sectors_entry.attr, 653c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 654c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 65586b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 65686b37281SMartin K. Petersen &queue_discard_max_entry.attr, 6570034af03SJens Axboe &queue_discard_max_hw_entry.attr, 65898262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 6594363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr, 660a6f0788eSChaitanya Kulkarni &queue_write_zeroes_max_entry.attr, 6610512a75bSKeith Busch &queue_zone_append_max_entry.attr, 662a805a4faSDamien Le Moal &queue_zone_write_granularity_entry.attr, 6631308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 664797476b8SDamien Le Moal &queue_zoned_entry.attr, 665965b652eSDamien Le Moal &queue_nr_zones_entry.attr, 666e15864f8SNiklas Cassel &queue_max_open_zones_entry.attr, 667659bf827SNiklas Cassel &queue_max_active_zones_entry.attr, 668ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 669c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 670bc58ba94SJens Axboe &queue_iostats_entry.attr, 6711cb039f3SChristoph Hellwig &queue_stable_writes_entry.attr, 672e2e1a148SJens Axboe &queue_random_entry.attr, 67305229beeSJens Axboe &queue_poll_entry.attr, 67493e9d8e8SJens Axboe &queue_wc_entry.attr, 6756fcefbe5SKent Overstreet &queue_fua_entry.attr, 676ea6ca600SYigal Korman &queue_dax_entry.attr, 67787760e5eSJens Axboe &queue_wb_lat_entry.attr, 67806426adfSJens Axboe &queue_poll_delay_entry.attr, 67965cd1d13SWeiping Zhang &queue_io_timeout_entry.attr, 680297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 68135626147SChristoph Hellwig &blk_throtl_sample_time_entry.attr, 682297e3d85SShaohua Li #endif 68328af7428SMax Gurtovoy &queue_virt_boundary_mask_entry.attr, 6848324aa91SJens Axboe NULL, 6858324aa91SJens Axboe }; 6868324aa91SJens Axboe 6874d25339eSWeiping Zhang static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, 6884d25339eSWeiping Zhang int n) 6894d25339eSWeiping Zhang { 6904d25339eSWeiping Zhang struct request_queue *q = 6914d25339eSWeiping Zhang container_of(kobj, struct request_queue, kobj); 6924d25339eSWeiping Zhang 6934d25339eSWeiping Zhang if (attr == &queue_io_timeout_entry.attr && 6944d25339eSWeiping Zhang (!q->mq_ops || !q->mq_ops->timeout)) 6954d25339eSWeiping Zhang return 0; 6964d25339eSWeiping Zhang 697659bf827SNiklas Cassel if ((attr == &queue_max_open_zones_entry.attr || 698659bf827SNiklas Cassel attr == &queue_max_active_zones_entry.attr) && 699e15864f8SNiklas Cassel !blk_queue_is_zoned(q)) 700e15864f8SNiklas Cassel return 0; 701e15864f8SNiklas Cassel 7024d25339eSWeiping Zhang return attr->mode; 7034d25339eSWeiping Zhang } 7044d25339eSWeiping Zhang 7054d25339eSWeiping Zhang static struct attribute_group queue_attr_group = { 7064d25339eSWeiping Zhang .attrs = queue_attrs, 7074d25339eSWeiping Zhang .is_visible = queue_attr_visible, 7084d25339eSWeiping Zhang }; 7094d25339eSWeiping Zhang 7104d25339eSWeiping Zhang 7118324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 7128324aa91SJens Axboe 7138324aa91SJens Axboe static ssize_t 7148324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 7158324aa91SJens Axboe { 7168324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7178324aa91SJens Axboe struct request_queue *q = 7188324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 7198324aa91SJens Axboe ssize_t res; 7208324aa91SJens Axboe 7218324aa91SJens Axboe if (!entry->show) 7228324aa91SJens Axboe return -EIO; 7238324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7248324aa91SJens Axboe res = entry->show(q, page); 7258324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7268324aa91SJens Axboe return res; 7278324aa91SJens Axboe } 7288324aa91SJens Axboe 7298324aa91SJens Axboe static ssize_t 7308324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 7318324aa91SJens Axboe const char *page, size_t length) 7328324aa91SJens Axboe { 7338324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7346728cb0eSJens Axboe struct request_queue *q; 7358324aa91SJens Axboe ssize_t res; 7368324aa91SJens Axboe 7378324aa91SJens Axboe if (!entry->store) 7388324aa91SJens Axboe return -EIO; 7396728cb0eSJens Axboe 7406728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 7418324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7428324aa91SJens Axboe res = entry->store(q, page, length); 7438324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7448324aa91SJens Axboe return res; 7458324aa91SJens Axboe } 7468324aa91SJens Axboe 747548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head) 748548bc8e1STejun Heo { 749548bc8e1STejun Heo struct request_queue *q = container_of(rcu_head, struct request_queue, 750548bc8e1STejun Heo rcu_head); 751548bc8e1STejun Heo kmem_cache_free(blk_requestq_cachep, q); 752548bc8e1STejun Heo } 753548bc8e1STejun Heo 75447cdee29SMing Lei /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ 75547cdee29SMing Lei static void blk_exit_queue(struct request_queue *q) 75647cdee29SMing Lei { 75747cdee29SMing Lei /* 75847cdee29SMing Lei * Since the I/O scheduler exit code may access cgroup information, 75947cdee29SMing Lei * perform I/O scheduler exit before disassociating from the block 76047cdee29SMing Lei * cgroup controller. 76147cdee29SMing Lei */ 76247cdee29SMing Lei if (q->elevator) { 76347cdee29SMing Lei ioc_clear_queue(q); 764c3e22192SMing Lei __elevator_exit(q, q->elevator); 76547cdee29SMing Lei } 76647cdee29SMing Lei 76747cdee29SMing Lei /* 76847cdee29SMing Lei * Remove all references to @q from the block cgroup controller before 76947cdee29SMing Lei * restoring @q->queue_lock to avoid that restoring this pointer causes 77047cdee29SMing Lei * e.g. blkcg_print_blkgs() to crash. 77147cdee29SMing Lei */ 77247cdee29SMing Lei blkcg_exit_queue(q); 77347cdee29SMing Lei } 77447cdee29SMing Lei 7758324aa91SJens Axboe /** 776e8c7d14aSLuis Chamberlain * blk_release_queue - releases all allocated resources of the request_queue 777e8c7d14aSLuis Chamberlain * @kobj: pointer to a kobject, whose container is a request_queue 7788324aa91SJens Axboe * 779e8c7d14aSLuis Chamberlain * This function releases all allocated resources of the request queue. 780e8c7d14aSLuis Chamberlain * 781e8c7d14aSLuis Chamberlain * The struct request_queue refcount is incremented with blk_get_queue() and 782e8c7d14aSLuis Chamberlain * decremented with blk_put_queue(). Once the refcount reaches 0 this function 783e8c7d14aSLuis Chamberlain * is called. 784e8c7d14aSLuis Chamberlain * 785e8c7d14aSLuis Chamberlain * For drivers that have a request_queue on a gendisk and added with 786e8c7d14aSLuis Chamberlain * __device_add_disk() the refcount to request_queue will reach 0 with 787e8c7d14aSLuis Chamberlain * the last put_disk() called by the driver. For drivers which don't use 788e8c7d14aSLuis Chamberlain * __device_add_disk() this happens with blk_cleanup_queue(). 789e8c7d14aSLuis Chamberlain * 790e8c7d14aSLuis Chamberlain * Drivers exist which depend on the release of the request_queue to be 791e8c7d14aSLuis Chamberlain * synchronous, it should not be deferred. 792e8c7d14aSLuis Chamberlain * 793e8c7d14aSLuis Chamberlain * Context: can sleep 794dc9edc44SBart Van Assche */ 795e8c7d14aSLuis Chamberlain static void blk_release_queue(struct kobject *kobj) 7968324aa91SJens Axboe { 797e8c7d14aSLuis Chamberlain struct request_queue *q = 798e8c7d14aSLuis Chamberlain container_of(kobj, struct request_queue, kobj); 799e8c7d14aSLuis Chamberlain 800e8c7d14aSLuis Chamberlain might_sleep(); 8018324aa91SJens Axboe 80234dbad5dSOmar Sandoval if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 80334dbad5dSOmar Sandoval blk_stat_remove_callback(q, q->poll_cb); 80434dbad5dSOmar Sandoval blk_stat_free_callback(q->poll_cb); 805777eb1bfSHannes Reinecke 80634dbad5dSOmar Sandoval blk_free_queue_stats(q->stats); 80734dbad5dSOmar Sandoval 80847ce030bSYang Yang if (queue_is_mq(q)) { 80947ce030bSYang Yang struct blk_mq_hw_ctx *hctx; 81047ce030bSYang Yang int i; 81147ce030bSYang Yang 812e26cc082Szhengbin cancel_delayed_work_sync(&q->requeue_work); 813e26cc082Szhengbin 81447ce030bSYang Yang queue_for_each_hw_ctx(q, hctx, i) 81547ce030bSYang Yang cancel_delayed_work_sync(&hctx->run_work); 81647ce030bSYang Yang } 81747ce030bSYang Yang 81847cdee29SMing Lei blk_exit_queue(q); 81947cdee29SMing Lei 820bf505456SDamien Le Moal blk_queue_free_zone_bitmaps(q); 821bf505456SDamien Le Moal 822344e9ffcSJens Axboe if (queue_is_mq(q)) 823e09aae7eSMing Lei blk_mq_release(q); 82418741986SChristoph Hellwig 8258324aa91SJens Axboe blk_trace_shutdown(q); 82685e0cbbbSLuis Chamberlain mutex_lock(&q->debugfs_mutex); 82785e0cbbbSLuis Chamberlain debugfs_remove_recursive(q->debugfs_dir); 82885e0cbbbSLuis Chamberlain mutex_unlock(&q->debugfs_mutex); 8298324aa91SJens Axboe 830344e9ffcSJens Axboe if (queue_is_mq(q)) 83162ebce16SOmar Sandoval blk_mq_debugfs_unregister(q); 83262ebce16SOmar Sandoval 833338aa96dSKent Overstreet bioset_exit(&q->bio_split); 83454efd50bSKent Overstreet 835a73f730dSTejun Heo ida_simple_remove(&blk_queue_ida, q->id); 836548bc8e1STejun Heo call_rcu(&q->rcu_head, blk_free_queue_rcu); 8378324aa91SJens Axboe } 8388324aa91SJens Axboe 83952cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 8408324aa91SJens Axboe .show = queue_attr_show, 8418324aa91SJens Axboe .store = queue_attr_store, 8428324aa91SJens Axboe }; 8438324aa91SJens Axboe 8448324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 8458324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 8468324aa91SJens Axboe .release = blk_release_queue, 8478324aa91SJens Axboe }; 8488324aa91SJens Axboe 8492c2086afSBart Van Assche /** 8502c2086afSBart Van Assche * blk_register_queue - register a block layer queue with sysfs 8512c2086afSBart Van Assche * @disk: Disk of which the request queue should be registered with sysfs. 8522c2086afSBart Van Assche */ 8538324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 8548324aa91SJens Axboe { 8558324aa91SJens Axboe int ret; 8561d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 8578324aa91SJens Axboe struct request_queue *q = disk->queue; 8588324aa91SJens Axboe 859fb199746SAkinobu Mita if (WARN_ON(!q)) 8608324aa91SJens Axboe return -ENXIO; 8618324aa91SJens Axboe 86258c898baSMing Lei WARN_ONCE(blk_queue_registered(q), 863334335d2SOmar Sandoval "%s is registering an already registered queue\n", 864334335d2SOmar Sandoval kobject_name(&dev->kobj)); 865334335d2SOmar Sandoval 866471aa704SChristoph Hellwig disk_update_readahead(disk); 867c2e4cd57SChristoph Hellwig 8681d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 8691d54ad6dSLi Zefan if (ret) 8701d54ad6dSLi Zefan return ret; 8711d54ad6dSLi Zefan 872cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock); 873b410aff2STahsin Erdogan 874c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 875ed5302d3SLiu Yuan if (ret < 0) { 876ed5302d3SLiu Yuan blk_trace_remove_sysfs(dev); 877b410aff2STahsin Erdogan goto unlock; 878ed5302d3SLiu Yuan } 8798324aa91SJens Axboe 8804d25339eSWeiping Zhang ret = sysfs_create_group(&q->kobj, &queue_attr_group); 8814d25339eSWeiping Zhang if (ret) { 8824d25339eSWeiping Zhang blk_trace_remove_sysfs(dev); 8834d25339eSWeiping Zhang kobject_del(&q->kobj); 8844d25339eSWeiping Zhang kobject_put(&dev->kobj); 8854d25339eSWeiping Zhang goto unlock; 8864d25339eSWeiping Zhang } 8874d25339eSWeiping Zhang 88885e0cbbbSLuis Chamberlain mutex_lock(&q->debugfs_mutex); 88985e0cbbbSLuis Chamberlain q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), 89085e0cbbbSLuis Chamberlain blk_debugfs_root); 89185e0cbbbSLuis Chamberlain mutex_unlock(&q->debugfs_mutex); 89285e0cbbbSLuis Chamberlain 893344e9ffcSJens Axboe if (queue_is_mq(q)) { 8942d0364c8SBart Van Assche __blk_mq_register_dev(dev, q); 8959c1051aaSOmar Sandoval blk_mq_debugfs_register(q); 896a8ecdd71SBart Van Assche } 8979c1051aaSOmar Sandoval 898b89f625eSMing Lei mutex_lock(&q->sysfs_lock); 899344e9ffcSJens Axboe if (q->elevator) { 900cecf5d87SMing Lei ret = elv_register_queue(q, false); 9018324aa91SJens Axboe if (ret) { 902b89f625eSMing Lei mutex_unlock(&q->sysfs_lock); 903cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 9048324aa91SJens Axboe kobject_del(&q->kobj); 90580656b67SLiu Yuan blk_trace_remove_sysfs(dev); 906c87ffbb8SXiaotian Feng kobject_put(&dev->kobj); 9072c2086afSBart Van Assche return ret; 908b410aff2STahsin Erdogan } 909b410aff2STahsin Erdogan } 910cecf5d87SMing Lei 911cecf5d87SMing Lei blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 912cecf5d87SMing Lei wbt_enable_default(q); 913cecf5d87SMing Lei blk_throtl_register_queue(q); 914cecf5d87SMing Lei 915cecf5d87SMing Lei /* Now everything is ready and send out KOBJ_ADD uevent */ 916cecf5d87SMing Lei kobject_uevent(&q->kobj, KOBJ_ADD); 9170546858cSYufen Yu if (q->elevator) 918cecf5d87SMing Lei kobject_uevent(&q->elevator->kobj, KOBJ_ADD); 919cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock); 920cecf5d87SMing Lei 921b410aff2STahsin Erdogan ret = 0; 922b410aff2STahsin Erdogan unlock: 923cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 924a72c374fSMing Lei 925a72c374fSMing Lei /* 926a72c374fSMing Lei * SCSI probing may synchronously create and destroy a lot of 927a72c374fSMing Lei * request_queues for non-existent devices. Shutting down a fully 928a72c374fSMing Lei * functional queue takes measureable wallclock time as RCU grace 929a72c374fSMing Lei * periods are involved. To avoid excessive latency in these 930a72c374fSMing Lei * cases, a request_queue starts out in a degraded mode which is 931a72c374fSMing Lei * faster to shut down and is made fully functional here as 932a72c374fSMing Lei * request_queues for non-existent devices never get registered. 933a72c374fSMing Lei */ 934a72c374fSMing Lei if (!blk_queue_init_done(q)) { 935a72c374fSMing Lei blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); 936a72c374fSMing Lei percpu_ref_switch_to_percpu(&q->q_usage_counter); 937a72c374fSMing Lei } 938a72c374fSMing Lei 9398324aa91SJens Axboe return ret; 9408324aa91SJens Axboe } 9418324aa91SJens Axboe 9422c2086afSBart Van Assche /** 9432c2086afSBart Van Assche * blk_unregister_queue - counterpart of blk_register_queue() 9442c2086afSBart Van Assche * @disk: Disk of which the request queue should be unregistered from sysfs. 9452c2086afSBart Van Assche * 9462c2086afSBart Van Assche * Note: the caller is responsible for guaranteeing that this function is called 9472c2086afSBart Van Assche * after blk_register_queue() has finished. 9482c2086afSBart Van Assche */ 9498324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 9508324aa91SJens Axboe { 9518324aa91SJens Axboe struct request_queue *q = disk->queue; 9528324aa91SJens Axboe 953fb199746SAkinobu Mita if (WARN_ON(!q)) 954fb199746SAkinobu Mita return; 955fb199746SAkinobu Mita 956fa70d2e2SMike Snitzer /* Return early if disk->queue was never registered. */ 95758c898baSMing Lei if (!blk_queue_registered(q)) 958fa70d2e2SMike Snitzer return; 959fa70d2e2SMike Snitzer 960667257e8SMike Snitzer /* 9612c2086afSBart Van Assche * Since sysfs_remove_dir() prevents adding new directory entries 9622c2086afSBart Van Assche * before removal of existing entries starts, protect against 9632c2086afSBart Van Assche * concurrent elv_iosched_store() calls. 964667257e8SMike Snitzer */ 965e9a823fbSDavid Jeffery mutex_lock(&q->sysfs_lock); 9668814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 967cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock); 968334335d2SOmar Sandoval 969cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock); 9702c2086afSBart Van Assche /* 9712c2086afSBart Van Assche * Remove the sysfs attributes before unregistering the queue data 9722c2086afSBart Van Assche * structures that can be modified through sysfs. 9732c2086afSBart Van Assche */ 974344e9ffcSJens Axboe if (queue_is_mq(q)) 975b21d5b30SMatias Bjørling blk_mq_unregister_dev(disk_to_dev(disk), q); 9768324aa91SJens Axboe 9778324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9788324aa91SJens Axboe kobject_del(&q->kobj); 97948c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 980667257e8SMike Snitzer 981b89f625eSMing Lei mutex_lock(&q->sysfs_lock); 982344e9ffcSJens Axboe if (q->elevator) 9832c2086afSBart Van Assche elv_unregister_queue(q); 984b89f625eSMing Lei mutex_unlock(&q->sysfs_lock); 985cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 9862c2086afSBart Van Assche 9872c2086afSBart Van Assche kobject_put(&disk_to_dev(disk)->kobj); 9888324aa91SJens Axboe } 989