1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 28324aa91SJens Axboe /* 38324aa91SJens Axboe * Functions related to sysfs handling 48324aa91SJens Axboe */ 58324aa91SJens Axboe #include <linux/kernel.h> 65a0e3ad6STejun Heo #include <linux/slab.h> 78324aa91SJens Axboe #include <linux/module.h> 88324aa91SJens Axboe #include <linux/bio.h> 98324aa91SJens Axboe #include <linux/blkdev.h> 1066114cadSTejun Heo #include <linux/backing-dev.h> 118324aa91SJens Axboe #include <linux/blktrace_api.h> 12320ae51fSJens Axboe #include <linux/blk-mq.h> 13eea8f41cSTejun Heo #include <linux/blk-cgroup.h> 1485e0cbbbSLuis Chamberlain #include <linux/debugfs.h> 158324aa91SJens Axboe 168324aa91SJens Axboe #include "blk.h" 173edcc0ceSMing Lei #include "blk-mq.h" 18d173a251SOmar Sandoval #include "blk-mq-debugfs.h" 19*2aa7745bSChristoph Hellwig #include "blk-mq-sched.h" 2087760e5eSJens Axboe #include "blk-wbt.h" 21a7b36ee6SJens Axboe #include "blk-throttle.h" 228324aa91SJens Axboe 238324aa91SJens Axboe struct queue_sysfs_entry { 248324aa91SJens Axboe struct attribute attr; 258324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 268324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 278324aa91SJens Axboe }; 288324aa91SJens Axboe 298324aa91SJens Axboe static ssize_t 309cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 318324aa91SJens Axboe { 329cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 338324aa91SJens Axboe } 348324aa91SJens Axboe 358324aa91SJens Axboe static ssize_t 368324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 378324aa91SJens Axboe { 38b1f3b64dSDave Reisner int err; 39b1f3b64dSDave Reisner unsigned long v; 408324aa91SJens Axboe 41ed751e68SJingoo Han err = kstrtoul(page, 10, &v); 42b1f3b64dSDave Reisner if (err || v > UINT_MAX) 43b1f3b64dSDave Reisner return -EINVAL; 44b1f3b64dSDave Reisner 45b1f3b64dSDave Reisner *var = v; 46b1f3b64dSDave Reisner 478324aa91SJens Axboe return count; 488324aa91SJens Axboe } 498324aa91SJens Axboe 5080e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page) 5187760e5eSJens Axboe { 5287760e5eSJens Axboe int err; 5380e091d1SJens Axboe s64 v; 5487760e5eSJens Axboe 5580e091d1SJens Axboe err = kstrtos64(page, 10, &v); 5687760e5eSJens Axboe if (err < 0) 5787760e5eSJens Axboe return err; 5887760e5eSJens Axboe 5987760e5eSJens Axboe *var = v; 6087760e5eSJens Axboe return 0; 6187760e5eSJens Axboe } 6287760e5eSJens Axboe 638324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 648324aa91SJens Axboe { 6528af7428SMax Gurtovoy return queue_var_show(q->nr_requests, page); 668324aa91SJens Axboe } 678324aa91SJens Axboe 688324aa91SJens Axboe static ssize_t 698324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 708324aa91SJens Axboe { 718324aa91SJens Axboe unsigned long nr; 72e3a2b3f9SJens Axboe int ret, err; 73b8a9ae77SJens Axboe 74344e9ffcSJens Axboe if (!queue_is_mq(q)) 75b8a9ae77SJens Axboe return -EINVAL; 76b8a9ae77SJens Axboe 77b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 78b1f3b64dSDave Reisner if (ret < 0) 79b1f3b64dSDave Reisner return ret; 80b1f3b64dSDave Reisner 818324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 828324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 838324aa91SJens Axboe 84e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr); 85e3a2b3f9SJens Axboe if (err) 86e3a2b3f9SJens Axboe return err; 87a051661cSTejun Heo 888324aa91SJens Axboe return ret; 898324aa91SJens Axboe } 908324aa91SJens Axboe 918324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 928324aa91SJens Axboe { 93edb0872fSChristoph Hellwig unsigned long ra_kb; 948324aa91SJens Axboe 95d152c682SChristoph Hellwig if (!q->disk) 96edb0872fSChristoph Hellwig return -EINVAL; 97d152c682SChristoph Hellwig ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); 988c390ff9SMax Gurtovoy return queue_var_show(ra_kb, page); 998324aa91SJens Axboe } 1008324aa91SJens Axboe 1018324aa91SJens Axboe static ssize_t 1028324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 1038324aa91SJens Axboe { 1048324aa91SJens Axboe unsigned long ra_kb; 105edb0872fSChristoph Hellwig ssize_t ret; 1068324aa91SJens Axboe 107d152c682SChristoph Hellwig if (!q->disk) 108edb0872fSChristoph Hellwig return -EINVAL; 109edb0872fSChristoph Hellwig ret = queue_var_store(&ra_kb, page, count); 110b1f3b64dSDave Reisner if (ret < 0) 111b1f3b64dSDave Reisner return ret; 112d152c682SChristoph Hellwig q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 1138324aa91SJens Axboe return ret; 1148324aa91SJens Axboe } 1158324aa91SJens Axboe 1168324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1178324aa91SJens Axboe { 118ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1198324aa91SJens Axboe 1208c390ff9SMax Gurtovoy return queue_var_show(max_sectors_kb, page); 1218324aa91SJens Axboe } 1228324aa91SJens Axboe 123c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 124c77a5710SMartin K. Petersen { 1258c390ff9SMax Gurtovoy return queue_var_show(queue_max_segments(q), page); 126c77a5710SMartin K. Petersen } 127c77a5710SMartin K. Petersen 1281e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q, 1291e739730SChristoph Hellwig char *page) 1301e739730SChristoph Hellwig { 1318c390ff9SMax Gurtovoy return queue_var_show(queue_max_discard_segments(q), page); 1321e739730SChristoph Hellwig } 1331e739730SChristoph Hellwig 13413f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 13513f05c8dSMartin K. Petersen { 1368c390ff9SMax Gurtovoy return queue_var_show(q->limits.max_integrity_segments, page); 13713f05c8dSMartin K. Petersen } 13813f05c8dSMartin K. Petersen 139c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 140c77a5710SMartin K. Petersen { 1418c390ff9SMax Gurtovoy return queue_var_show(queue_max_segment_size(q), page); 142c77a5710SMartin K. Petersen } 143c77a5710SMartin K. Petersen 144e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 145e68b903cSMartin K. Petersen { 146e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 147e68b903cSMartin K. Petersen } 148e68b903cSMartin K. Petersen 149c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 150c72758f3SMartin K. Petersen { 151c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 152c72758f3SMartin K. Petersen } 153c72758f3SMartin K. Petersen 15487caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 15587caf97cSHannes Reinecke { 15687caf97cSHannes Reinecke return queue_var_show(q->limits.chunk_sectors, page); 15787caf97cSHannes Reinecke } 15887caf97cSHannes Reinecke 159c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 160c72758f3SMartin K. Petersen { 161c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 162c72758f3SMartin K. Petersen } 163c72758f3SMartin K. Petersen 164c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 165c72758f3SMartin K. Petersen { 166c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1678324aa91SJens Axboe } 1688324aa91SJens Axboe 16986b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 17086b37281SMartin K. Petersen { 17186b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 17286b37281SMartin K. Petersen } 17386b37281SMartin K. Petersen 1740034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 1750034af03SJens Axboe { 1760034af03SJens Axboe 17718f922d0SAlan return sprintf(page, "%llu\n", 17818f922d0SAlan (unsigned long long)q->limits.max_hw_discard_sectors << 9); 1790034af03SJens Axboe } 1800034af03SJens Axboe 18186b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 18286b37281SMartin K. Petersen { 183a934a00aSMartin K. Petersen return sprintf(page, "%llu\n", 184a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9); 18586b37281SMartin K. Petersen } 18686b37281SMartin K. Petersen 1870034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q, 1880034af03SJens Axboe const char *page, size_t count) 1890034af03SJens Axboe { 1900034af03SJens Axboe unsigned long max_discard; 1910034af03SJens Axboe ssize_t ret = queue_var_store(&max_discard, page, count); 1920034af03SJens Axboe 1930034af03SJens Axboe if (ret < 0) 1940034af03SJens Axboe return ret; 1950034af03SJens Axboe 1960034af03SJens Axboe if (max_discard & (q->limits.discard_granularity - 1)) 1970034af03SJens Axboe return -EINVAL; 1980034af03SJens Axboe 1990034af03SJens Axboe max_discard >>= 9; 2000034af03SJens Axboe if (max_discard > UINT_MAX) 2010034af03SJens Axboe return -EINVAL; 2020034af03SJens Axboe 2030034af03SJens Axboe if (max_discard > q->limits.max_hw_discard_sectors) 2040034af03SJens Axboe max_discard = q->limits.max_hw_discard_sectors; 2050034af03SJens Axboe 2060034af03SJens Axboe q->limits.max_discard_sectors = max_discard; 2070034af03SJens Axboe return ret; 2080034af03SJens Axboe } 2090034af03SJens Axboe 21098262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 21198262f27SMartin K. Petersen { 21248920ff2SChristoph Hellwig return queue_var_show(0, page); 21398262f27SMartin K. Petersen } 21498262f27SMartin K. Petersen 2154363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 2164363ac7cSMartin K. Petersen { 2174363ac7cSMartin K. Petersen return sprintf(page, "%llu\n", 2184363ac7cSMartin K. Petersen (unsigned long long)q->limits.max_write_same_sectors << 9); 2194363ac7cSMartin K. Petersen } 2204363ac7cSMartin K. Petersen 221a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 222a6f0788eSChaitanya Kulkarni { 223a6f0788eSChaitanya Kulkarni return sprintf(page, "%llu\n", 224a6f0788eSChaitanya Kulkarni (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 225a6f0788eSChaitanya Kulkarni } 2264363ac7cSMartin K. Petersen 227a805a4faSDamien Le Moal static ssize_t queue_zone_write_granularity_show(struct request_queue *q, 228a805a4faSDamien Le Moal char *page) 229a805a4faSDamien Le Moal { 230a805a4faSDamien Le Moal return queue_var_show(queue_zone_write_granularity(q), page); 231a805a4faSDamien Le Moal } 232a805a4faSDamien Le Moal 2330512a75bSKeith Busch static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) 2340512a75bSKeith Busch { 2350512a75bSKeith Busch unsigned long long max_sectors = q->limits.max_zone_append_sectors; 2360512a75bSKeith Busch 2370512a75bSKeith Busch return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); 2380512a75bSKeith Busch } 2390512a75bSKeith Busch 2408324aa91SJens Axboe static ssize_t 2418324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 2428324aa91SJens Axboe { 2438324aa91SJens Axboe unsigned long max_sectors_kb, 244ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 24509cbfeafSKirill A. Shutemov page_kb = 1 << (PAGE_SHIFT - 10); 2468324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 2478324aa91SJens Axboe 248b1f3b64dSDave Reisner if (ret < 0) 249b1f3b64dSDave Reisner return ret; 250b1f3b64dSDave Reisner 251ca369d51SMartin K. Petersen max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 252ca369d51SMartin K. Petersen q->limits.max_dev_sectors >> 1); 253ca369d51SMartin K. Petersen 2548324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 2558324aa91SJens Axboe return -EINVAL; 2567c239517SWu Fengguang 2570d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 258c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 259d152c682SChristoph Hellwig if (q->disk) 260d152c682SChristoph Hellwig q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 2610d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 2628324aa91SJens Axboe 2638324aa91SJens Axboe return ret; 2648324aa91SJens Axboe } 2658324aa91SJens Axboe 2668324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 2678324aa91SJens Axboe { 268ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 2698324aa91SJens Axboe 2708c390ff9SMax Gurtovoy return queue_var_show(max_hw_sectors_kb, page); 2718324aa91SJens Axboe } 2728324aa91SJens Axboe 27328af7428SMax Gurtovoy static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) 27428af7428SMax Gurtovoy { 2758c390ff9SMax Gurtovoy return queue_var_show(q->limits.virt_boundary_mask, page); 27628af7428SMax Gurtovoy } 27728af7428SMax Gurtovoy 278956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 279956bcb7cSJens Axboe static ssize_t \ 280fc93fe14SChristoph Hellwig queue_##name##_show(struct request_queue *q, char *page) \ 281956bcb7cSJens Axboe { \ 282956bcb7cSJens Axboe int bit; \ 283956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 284956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 285956bcb7cSJens Axboe } \ 286956bcb7cSJens Axboe static ssize_t \ 287fc93fe14SChristoph Hellwig queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ 288956bcb7cSJens Axboe { \ 289956bcb7cSJens Axboe unsigned long val; \ 290956bcb7cSJens Axboe ssize_t ret; \ 291956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 292c678ef52SArnd Bergmann if (ret < 0) \ 293c678ef52SArnd Bergmann return ret; \ 294956bcb7cSJens Axboe if (neg) \ 295956bcb7cSJens Axboe val = !val; \ 296956bcb7cSJens Axboe \ 297956bcb7cSJens Axboe if (val) \ 2988814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ 299956bcb7cSJens Axboe else \ 3008814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ 301956bcb7cSJens Axboe return ret; \ 3021308835fSBartlomiej Zolnierkiewicz } 3031308835fSBartlomiej Zolnierkiewicz 304956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 305956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 306956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 3071cb039f3SChristoph Hellwig QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); 308956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 3091308835fSBartlomiej Zolnierkiewicz 310797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page) 311797476b8SDamien Le Moal { 312797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 313797476b8SDamien Le Moal case BLK_ZONED_HA: 314797476b8SDamien Le Moal return sprintf(page, "host-aware\n"); 315797476b8SDamien Le Moal case BLK_ZONED_HM: 316797476b8SDamien Le Moal return sprintf(page, "host-managed\n"); 317797476b8SDamien Le Moal default: 318797476b8SDamien Le Moal return sprintf(page, "none\n"); 319797476b8SDamien Le Moal } 320797476b8SDamien Le Moal } 321797476b8SDamien Le Moal 322965b652eSDamien Le Moal static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) 323965b652eSDamien Le Moal { 324965b652eSDamien Le Moal return queue_var_show(blk_queue_nr_zones(q), page); 325965b652eSDamien Le Moal } 326965b652eSDamien Le Moal 327e15864f8SNiklas Cassel static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) 328e15864f8SNiklas Cassel { 329e15864f8SNiklas Cassel return queue_var_show(queue_max_open_zones(q), page); 330e15864f8SNiklas Cassel } 331e15864f8SNiklas Cassel 332659bf827SNiklas Cassel static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) 333659bf827SNiklas Cassel { 334659bf827SNiklas Cassel return queue_var_show(queue_max_active_zones(q), page); 335659bf827SNiklas Cassel } 336659bf827SNiklas Cassel 337ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 338ac9fafa1SAlan D. Brunelle { 339488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 340488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 341ac9fafa1SAlan D. Brunelle } 342ac9fafa1SAlan D. Brunelle 343ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 344ac9fafa1SAlan D. Brunelle size_t count) 345ac9fafa1SAlan D. Brunelle { 346ac9fafa1SAlan D. Brunelle unsigned long nm; 347ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 348ac9fafa1SAlan D. Brunelle 349b1f3b64dSDave Reisner if (ret < 0) 350b1f3b64dSDave Reisner return ret; 351b1f3b64dSDave Reisner 35257d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 35357d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 354488991e2SAlan D. Brunelle if (nm == 2) 35557d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 356488991e2SAlan D. Brunelle else if (nm) 35757d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 3581308835fSBartlomiej Zolnierkiewicz 359ac9fafa1SAlan D. Brunelle return ret; 360ac9fafa1SAlan D. Brunelle } 361ac9fafa1SAlan D. Brunelle 362c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 363c7c22e4dSJens Axboe { 3649cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 3655757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 366c7c22e4dSJens Axboe 3675757a6d7SDan Williams return queue_var_show(set << force, page); 368c7c22e4dSJens Axboe } 369c7c22e4dSJens Axboe 370c7c22e4dSJens Axboe static ssize_t 371c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 372c7c22e4dSJens Axboe { 373c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 3740a06ff06SChristoph Hellwig #ifdef CONFIG_SMP 375c7c22e4dSJens Axboe unsigned long val; 376c7c22e4dSJens Axboe 377c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 378b1f3b64dSDave Reisner if (ret < 0) 379b1f3b64dSDave Reisner return ret; 380b1f3b64dSDave Reisner 381e8037d49SEric Seppanen if (val == 2) { 38257d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 38357d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 384e8037d49SEric Seppanen } else if (val == 1) { 38557d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 38657d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 387e8037d49SEric Seppanen } else if (val == 0) { 38857d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 38957d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 3905757a6d7SDan Williams } 391c7c22e4dSJens Axboe #endif 392c7c22e4dSJens Axboe return ret; 393c7c22e4dSJens Axboe } 3948324aa91SJens Axboe 39506426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 39606426adfSJens Axboe { 39764f1c21eSJens Axboe int val; 39864f1c21eSJens Axboe 39929ece8b4SYufen Yu if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) 40029ece8b4SYufen Yu val = BLK_MQ_POLL_CLASSIC; 40164f1c21eSJens Axboe else 40264f1c21eSJens Axboe val = q->poll_nsec / 1000; 40364f1c21eSJens Axboe 40464f1c21eSJens Axboe return sprintf(page, "%d\n", val); 40506426adfSJens Axboe } 40606426adfSJens Axboe 40706426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 40806426adfSJens Axboe size_t count) 40906426adfSJens Axboe { 41064f1c21eSJens Axboe int err, val; 41106426adfSJens Axboe 41206426adfSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 41306426adfSJens Axboe return -EINVAL; 41406426adfSJens Axboe 41564f1c21eSJens Axboe err = kstrtoint(page, 10, &val); 41664f1c21eSJens Axboe if (err < 0) 41764f1c21eSJens Axboe return err; 41806426adfSJens Axboe 41929ece8b4SYufen Yu if (val == BLK_MQ_POLL_CLASSIC) 42029ece8b4SYufen Yu q->poll_nsec = BLK_MQ_POLL_CLASSIC; 42129ece8b4SYufen Yu else if (val >= 0) 42264f1c21eSJens Axboe q->poll_nsec = val * 1000; 42329ece8b4SYufen Yu else 42429ece8b4SYufen Yu return -EINVAL; 42564f1c21eSJens Axboe 42664f1c21eSJens Axboe return count; 42706426adfSJens Axboe } 42806426adfSJens Axboe 42905229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page) 43005229beeSJens Axboe { 43105229beeSJens Axboe return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 43205229beeSJens Axboe } 43305229beeSJens Axboe 43405229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page, 43505229beeSJens Axboe size_t count) 43605229beeSJens Axboe { 437a614dd22SChristoph Hellwig if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 43805229beeSJens Axboe return -EINVAL; 439a614dd22SChristoph Hellwig pr_info_ratelimited("writes to the poll attribute are ignored.\n"); 440a614dd22SChristoph Hellwig pr_info_ratelimited("please use driver specific parameters instead.\n"); 441a614dd22SChristoph Hellwig return count; 44205229beeSJens Axboe } 44305229beeSJens Axboe 44465cd1d13SWeiping Zhang static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) 44565cd1d13SWeiping Zhang { 44665cd1d13SWeiping Zhang return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); 44765cd1d13SWeiping Zhang } 44865cd1d13SWeiping Zhang 44965cd1d13SWeiping Zhang static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, 45065cd1d13SWeiping Zhang size_t count) 45165cd1d13SWeiping Zhang { 45265cd1d13SWeiping Zhang unsigned int val; 45365cd1d13SWeiping Zhang int err; 45465cd1d13SWeiping Zhang 45565cd1d13SWeiping Zhang err = kstrtou32(page, 10, &val); 45665cd1d13SWeiping Zhang if (err || val == 0) 45765cd1d13SWeiping Zhang return -EINVAL; 45865cd1d13SWeiping Zhang 45965cd1d13SWeiping Zhang blk_queue_rq_timeout(q, msecs_to_jiffies(val)); 46065cd1d13SWeiping Zhang 46165cd1d13SWeiping Zhang return count; 46265cd1d13SWeiping Zhang } 46365cd1d13SWeiping Zhang 46487760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 46587760e5eSJens Axboe { 466a7905043SJosef Bacik if (!wbt_rq_qos(q)) 46787760e5eSJens Axboe return -EINVAL; 46887760e5eSJens Axboe 469a7905043SJosef Bacik return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); 47087760e5eSJens Axboe } 47187760e5eSJens Axboe 47287760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 47387760e5eSJens Axboe size_t count) 47487760e5eSJens Axboe { 475a7905043SJosef Bacik struct rq_qos *rqos; 47687760e5eSJens Axboe ssize_t ret; 47780e091d1SJens Axboe s64 val; 47887760e5eSJens Axboe 47987760e5eSJens Axboe ret = queue_var_store64(&val, page); 48087760e5eSJens Axboe if (ret < 0) 48187760e5eSJens Axboe return ret; 482d62118b6SJens Axboe if (val < -1) 483d62118b6SJens Axboe return -EINVAL; 484d62118b6SJens Axboe 485a7905043SJosef Bacik rqos = wbt_rq_qos(q); 486a7905043SJosef Bacik if (!rqos) { 487d62118b6SJens Axboe ret = wbt_init(q); 488d62118b6SJens Axboe if (ret) 489d62118b6SJens Axboe return ret; 490d62118b6SJens Axboe } 49187760e5eSJens Axboe 49280e091d1SJens Axboe if (val == -1) 493a7905043SJosef Bacik val = wbt_default_latency_nsec(q); 49480e091d1SJens Axboe else if (val >= 0) 495a7905043SJosef Bacik val *= 1000ULL; 496d62118b6SJens Axboe 497b7143fe6SAleksei Zakharov if (wbt_get_min_lat(q) == val) 498b7143fe6SAleksei Zakharov return count; 499b7143fe6SAleksei Zakharov 500c125311dSJens Axboe /* 501c125311dSJens Axboe * Ensure that the queue is idled, in case the latency update 502c125311dSJens Axboe * ends up either enabling or disabling wbt completely. We can't 503c125311dSJens Axboe * have IO inflight if that happens. 504c125311dSJens Axboe */ 505c125311dSJens Axboe blk_mq_freeze_queue(q); 506c125311dSJens Axboe blk_mq_quiesce_queue(q); 50780e091d1SJens Axboe 508c125311dSJens Axboe wbt_set_min_lat(q, val); 509c125311dSJens Axboe 510c125311dSJens Axboe blk_mq_unquiesce_queue(q); 511c125311dSJens Axboe blk_mq_unfreeze_queue(q); 512c125311dSJens Axboe 51387760e5eSJens Axboe return count; 51487760e5eSJens Axboe } 51587760e5eSJens Axboe 51693e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page) 51793e9d8e8SJens Axboe { 51893e9d8e8SJens Axboe if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 51993e9d8e8SJens Axboe return sprintf(page, "write back\n"); 52093e9d8e8SJens Axboe 52193e9d8e8SJens Axboe return sprintf(page, "write through\n"); 52293e9d8e8SJens Axboe } 52393e9d8e8SJens Axboe 52493e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page, 52593e9d8e8SJens Axboe size_t count) 52693e9d8e8SJens Axboe { 52793e9d8e8SJens Axboe int set = -1; 52893e9d8e8SJens Axboe 52993e9d8e8SJens Axboe if (!strncmp(page, "write back", 10)) 53093e9d8e8SJens Axboe set = 1; 53193e9d8e8SJens Axboe else if (!strncmp(page, "write through", 13) || 53293e9d8e8SJens Axboe !strncmp(page, "none", 4)) 53393e9d8e8SJens Axboe set = 0; 53493e9d8e8SJens Axboe 53593e9d8e8SJens Axboe if (set == -1) 53693e9d8e8SJens Axboe return -EINVAL; 53793e9d8e8SJens Axboe 53893e9d8e8SJens Axboe if (set) 5398814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_WC, q); 54093e9d8e8SJens Axboe else 5418814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_WC, q); 54293e9d8e8SJens Axboe 54393e9d8e8SJens Axboe return count; 54493e9d8e8SJens Axboe } 54593e9d8e8SJens Axboe 5466fcefbe5SKent Overstreet static ssize_t queue_fua_show(struct request_queue *q, char *page) 5476fcefbe5SKent Overstreet { 5486fcefbe5SKent Overstreet return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); 5496fcefbe5SKent Overstreet } 5506fcefbe5SKent Overstreet 551ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page) 552ea6ca600SYigal Korman { 553ea6ca600SYigal Korman return queue_var_show(blk_queue_dax(q), page); 554ea6ca600SYigal Korman } 555ea6ca600SYigal Korman 55635626147SChristoph Hellwig #define QUEUE_RO_ENTRY(_prefix, _name) \ 55735626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = { \ 55835626147SChristoph Hellwig .attr = { .name = _name, .mode = 0444 }, \ 55935626147SChristoph Hellwig .show = _prefix##_show, \ 5608324aa91SJens Axboe }; 5618324aa91SJens Axboe 56235626147SChristoph Hellwig #define QUEUE_RW_ENTRY(_prefix, _name) \ 56335626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = { \ 56435626147SChristoph Hellwig .attr = { .name = _name, .mode = 0644 }, \ 56535626147SChristoph Hellwig .show = _prefix##_show, \ 56635626147SChristoph Hellwig .store = _prefix##_store, \ 5678324aa91SJens Axboe }; 5688324aa91SJens Axboe 56935626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_requests, "nr_requests"); 57035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); 57135626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); 57235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); 57335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); 57435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); 57535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); 57635626147SChristoph Hellwig QUEUE_RW_ENTRY(elv_iosched, "scheduler"); 5778324aa91SJens Axboe 57835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); 57935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); 58035626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); 58135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); 58235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); 5838324aa91SJens Axboe 58435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); 58535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); 58635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); 58735626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); 58835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); 589c77a5710SMartin K. Petersen 59035626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); 59135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); 59235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); 593a805a4faSDamien Le Moal QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); 5941e739730SChristoph Hellwig 59535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zoned, "zoned"); 59635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); 59735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); 59835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); 59913f05c8dSMartin K. Petersen 60035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); 60135626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); 60235626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll, "io_poll"); 60335626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); 60435626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wc, "write_cache"); 60535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_fua, "fua"); 60635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_dax, "dax"); 60735626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); 60835626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); 60928af7428SMax Gurtovoy QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); 610c77a5710SMartin K. Petersen 61135626147SChristoph Hellwig #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 61235626147SChristoph Hellwig QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); 61335626147SChristoph Hellwig #endif 6148324aa91SJens Axboe 61535626147SChristoph Hellwig /* legacy alias for logical_block_size: */ 616e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 6175657a819SJoe Perches .attr = {.name = "hw_sector_size", .mode = 0444 }, 618e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 619e1defc4fSMartin K. Petersen }; 620e1defc4fSMartin K. Petersen 621fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_nonrot, "rotational"); 622fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_iostats, "iostats"); 623fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_random, "add_random"); 6241cb039f3SChristoph Hellwig QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); 625e2e1a148SJens Axboe 6264d25339eSWeiping Zhang static struct attribute *queue_attrs[] = { 6278324aa91SJens Axboe &queue_requests_entry.attr, 6288324aa91SJens Axboe &queue_ra_entry.attr, 6298324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 6308324aa91SJens Axboe &queue_max_sectors_entry.attr, 631c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 6321e739730SChristoph Hellwig &queue_max_discard_segments_entry.attr, 63313f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 634c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 63535626147SChristoph Hellwig &elv_iosched_entry.attr, 636e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 637e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 638c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 63987caf97cSHannes Reinecke &queue_chunk_sectors_entry.attr, 640c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 641c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 64286b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 64386b37281SMartin K. Petersen &queue_discard_max_entry.attr, 6440034af03SJens Axboe &queue_discard_max_hw_entry.attr, 64598262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 6464363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr, 647a6f0788eSChaitanya Kulkarni &queue_write_zeroes_max_entry.attr, 6480512a75bSKeith Busch &queue_zone_append_max_entry.attr, 649a805a4faSDamien Le Moal &queue_zone_write_granularity_entry.attr, 6501308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 651797476b8SDamien Le Moal &queue_zoned_entry.attr, 652965b652eSDamien Le Moal &queue_nr_zones_entry.attr, 653e15864f8SNiklas Cassel &queue_max_open_zones_entry.attr, 654659bf827SNiklas Cassel &queue_max_active_zones_entry.attr, 655ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 656c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 657bc58ba94SJens Axboe &queue_iostats_entry.attr, 6581cb039f3SChristoph Hellwig &queue_stable_writes_entry.attr, 659e2e1a148SJens Axboe &queue_random_entry.attr, 66005229beeSJens Axboe &queue_poll_entry.attr, 66193e9d8e8SJens Axboe &queue_wc_entry.attr, 6626fcefbe5SKent Overstreet &queue_fua_entry.attr, 663ea6ca600SYigal Korman &queue_dax_entry.attr, 66487760e5eSJens Axboe &queue_wb_lat_entry.attr, 66506426adfSJens Axboe &queue_poll_delay_entry.attr, 66665cd1d13SWeiping Zhang &queue_io_timeout_entry.attr, 667297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 66835626147SChristoph Hellwig &blk_throtl_sample_time_entry.attr, 669297e3d85SShaohua Li #endif 67028af7428SMax Gurtovoy &queue_virt_boundary_mask_entry.attr, 6718324aa91SJens Axboe NULL, 6728324aa91SJens Axboe }; 6738324aa91SJens Axboe 6744d25339eSWeiping Zhang static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, 6754d25339eSWeiping Zhang int n) 6764d25339eSWeiping Zhang { 6774d25339eSWeiping Zhang struct request_queue *q = 6784d25339eSWeiping Zhang container_of(kobj, struct request_queue, kobj); 6794d25339eSWeiping Zhang 6804d25339eSWeiping Zhang if (attr == &queue_io_timeout_entry.attr && 6814d25339eSWeiping Zhang (!q->mq_ops || !q->mq_ops->timeout)) 6824d25339eSWeiping Zhang return 0; 6834d25339eSWeiping Zhang 684659bf827SNiklas Cassel if ((attr == &queue_max_open_zones_entry.attr || 685659bf827SNiklas Cassel attr == &queue_max_active_zones_entry.attr) && 686e15864f8SNiklas Cassel !blk_queue_is_zoned(q)) 687e15864f8SNiklas Cassel return 0; 688e15864f8SNiklas Cassel 6894d25339eSWeiping Zhang return attr->mode; 6904d25339eSWeiping Zhang } 6914d25339eSWeiping Zhang 6924d25339eSWeiping Zhang static struct attribute_group queue_attr_group = { 6934d25339eSWeiping Zhang .attrs = queue_attrs, 6944d25339eSWeiping Zhang .is_visible = queue_attr_visible, 6954d25339eSWeiping Zhang }; 6964d25339eSWeiping Zhang 6974d25339eSWeiping Zhang 6988324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 6998324aa91SJens Axboe 7008324aa91SJens Axboe static ssize_t 7018324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 7028324aa91SJens Axboe { 7038324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7048324aa91SJens Axboe struct request_queue *q = 7058324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 7068324aa91SJens Axboe ssize_t res; 7078324aa91SJens Axboe 7088324aa91SJens Axboe if (!entry->show) 7098324aa91SJens Axboe return -EIO; 7108324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7118324aa91SJens Axboe res = entry->show(q, page); 7128324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7138324aa91SJens Axboe return res; 7148324aa91SJens Axboe } 7158324aa91SJens Axboe 7168324aa91SJens Axboe static ssize_t 7178324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 7188324aa91SJens Axboe const char *page, size_t length) 7198324aa91SJens Axboe { 7208324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7216728cb0eSJens Axboe struct request_queue *q; 7228324aa91SJens Axboe ssize_t res; 7238324aa91SJens Axboe 7248324aa91SJens Axboe if (!entry->store) 7258324aa91SJens Axboe return -EIO; 7266728cb0eSJens Axboe 7276728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 7288324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7298324aa91SJens Axboe res = entry->store(q, page, length); 7308324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7318324aa91SJens Axboe return res; 7328324aa91SJens Axboe } 7338324aa91SJens Axboe 734548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head) 735548bc8e1STejun Heo { 736548bc8e1STejun Heo struct request_queue *q = container_of(rcu_head, struct request_queue, 737548bc8e1STejun Heo rcu_head); 738548bc8e1STejun Heo kmem_cache_free(blk_requestq_cachep, q); 739548bc8e1STejun Heo } 740548bc8e1STejun Heo 74147cdee29SMing Lei /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ 74247cdee29SMing Lei static void blk_exit_queue(struct request_queue *q) 74347cdee29SMing Lei { 74447cdee29SMing Lei /* 74547cdee29SMing Lei * Since the I/O scheduler exit code may access cgroup information, 74647cdee29SMing Lei * perform I/O scheduler exit before disassociating from the block 74747cdee29SMing Lei * cgroup controller. 74847cdee29SMing Lei */ 74947cdee29SMing Lei if (q->elevator) { 75047cdee29SMing Lei ioc_clear_queue(q); 7510c6cb3a2SChristoph Hellwig elevator_exit(q); 75247cdee29SMing Lei } 75347cdee29SMing Lei 75447cdee29SMing Lei /* 75547cdee29SMing Lei * Remove all references to @q from the block cgroup controller before 75647cdee29SMing Lei * restoring @q->queue_lock to avoid that restoring this pointer causes 75747cdee29SMing Lei * e.g. blkcg_print_blkgs() to crash. 75847cdee29SMing Lei */ 75947cdee29SMing Lei blkcg_exit_queue(q); 76047cdee29SMing Lei } 76147cdee29SMing Lei 7628324aa91SJens Axboe /** 763e8c7d14aSLuis Chamberlain * blk_release_queue - releases all allocated resources of the request_queue 764e8c7d14aSLuis Chamberlain * @kobj: pointer to a kobject, whose container is a request_queue 7658324aa91SJens Axboe * 766e8c7d14aSLuis Chamberlain * This function releases all allocated resources of the request queue. 767e8c7d14aSLuis Chamberlain * 768e8c7d14aSLuis Chamberlain * The struct request_queue refcount is incremented with blk_get_queue() and 769e8c7d14aSLuis Chamberlain * decremented with blk_put_queue(). Once the refcount reaches 0 this function 770e8c7d14aSLuis Chamberlain * is called. 771e8c7d14aSLuis Chamberlain * 772e8c7d14aSLuis Chamberlain * For drivers that have a request_queue on a gendisk and added with 773e8c7d14aSLuis Chamberlain * __device_add_disk() the refcount to request_queue will reach 0 with 774e8c7d14aSLuis Chamberlain * the last put_disk() called by the driver. For drivers which don't use 775e8c7d14aSLuis Chamberlain * __device_add_disk() this happens with blk_cleanup_queue(). 776e8c7d14aSLuis Chamberlain * 777e8c7d14aSLuis Chamberlain * Drivers exist which depend on the release of the request_queue to be 778e8c7d14aSLuis Chamberlain * synchronous, it should not be deferred. 779e8c7d14aSLuis Chamberlain * 780e8c7d14aSLuis Chamberlain * Context: can sleep 781dc9edc44SBart Van Assche */ 782e8c7d14aSLuis Chamberlain static void blk_release_queue(struct kobject *kobj) 7838324aa91SJens Axboe { 784e8c7d14aSLuis Chamberlain struct request_queue *q = 785e8c7d14aSLuis Chamberlain container_of(kobj, struct request_queue, kobj); 786e8c7d14aSLuis Chamberlain 787e8c7d14aSLuis Chamberlain might_sleep(); 7888324aa91SJens Axboe 78948b5c1fbSJens Axboe if (q->poll_stat) 79034dbad5dSOmar Sandoval blk_stat_remove_callback(q, q->poll_cb); 79134dbad5dSOmar Sandoval blk_stat_free_callback(q->poll_cb); 792777eb1bfSHannes Reinecke 79334dbad5dSOmar Sandoval blk_free_queue_stats(q->stats); 79448b5c1fbSJens Axboe kfree(q->poll_stat); 79534dbad5dSOmar Sandoval 79647cdee29SMing Lei blk_exit_queue(q); 79747cdee29SMing Lei 798bf505456SDamien Le Moal blk_queue_free_zone_bitmaps(q); 799bf505456SDamien Le Moal 800344e9ffcSJens Axboe if (queue_is_mq(q)) 801e09aae7eSMing Lei blk_mq_release(q); 80218741986SChristoph Hellwig 8038324aa91SJens Axboe blk_trace_shutdown(q); 80485e0cbbbSLuis Chamberlain mutex_lock(&q->debugfs_mutex); 80585e0cbbbSLuis Chamberlain debugfs_remove_recursive(q->debugfs_dir); 80685e0cbbbSLuis Chamberlain mutex_unlock(&q->debugfs_mutex); 8078324aa91SJens Axboe 808344e9ffcSJens Axboe if (queue_is_mq(q)) 80962ebce16SOmar Sandoval blk_mq_debugfs_unregister(q); 81062ebce16SOmar Sandoval 811338aa96dSKent Overstreet bioset_exit(&q->bio_split); 81254efd50bSKent Overstreet 813a73f730dSTejun Heo ida_simple_remove(&blk_queue_ida, q->id); 814548bc8e1STejun Heo call_rcu(&q->rcu_head, blk_free_queue_rcu); 8158324aa91SJens Axboe } 8168324aa91SJens Axboe 81752cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 8188324aa91SJens Axboe .show = queue_attr_show, 8198324aa91SJens Axboe .store = queue_attr_store, 8208324aa91SJens Axboe }; 8218324aa91SJens Axboe 8228324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 8238324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 8248324aa91SJens Axboe .release = blk_release_queue, 8258324aa91SJens Axboe }; 8268324aa91SJens Axboe 8272c2086afSBart Van Assche /** 8282c2086afSBart Van Assche * blk_register_queue - register a block layer queue with sysfs 8292c2086afSBart Van Assche * @disk: Disk of which the request queue should be registered with sysfs. 8302c2086afSBart Van Assche */ 8318324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 8328324aa91SJens Axboe { 8338324aa91SJens Axboe int ret; 8341d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 8358324aa91SJens Axboe struct request_queue *q = disk->queue; 8368324aa91SJens Axboe 8371d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 8381d54ad6dSLi Zefan if (ret) 8391d54ad6dSLi Zefan return ret; 8401d54ad6dSLi Zefan 841cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock); 842b410aff2STahsin Erdogan 843c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 844ed5302d3SLiu Yuan if (ret < 0) { 845ed5302d3SLiu Yuan blk_trace_remove_sysfs(dev); 846b410aff2STahsin Erdogan goto unlock; 847ed5302d3SLiu Yuan } 8488324aa91SJens Axboe 8494d25339eSWeiping Zhang ret = sysfs_create_group(&q->kobj, &queue_attr_group); 8504d25339eSWeiping Zhang if (ret) { 8514d25339eSWeiping Zhang blk_trace_remove_sysfs(dev); 8524d25339eSWeiping Zhang kobject_del(&q->kobj); 8534d25339eSWeiping Zhang kobject_put(&dev->kobj); 8544d25339eSWeiping Zhang goto unlock; 8554d25339eSWeiping Zhang } 8564d25339eSWeiping Zhang 85785e0cbbbSLuis Chamberlain mutex_lock(&q->debugfs_mutex); 85885e0cbbbSLuis Chamberlain q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), 85985e0cbbbSLuis Chamberlain blk_debugfs_root); 86085e0cbbbSLuis Chamberlain mutex_unlock(&q->debugfs_mutex); 86185e0cbbbSLuis Chamberlain 862344e9ffcSJens Axboe if (queue_is_mq(q)) { 8632d0364c8SBart Van Assche __blk_mq_register_dev(dev, q); 8649c1051aaSOmar Sandoval blk_mq_debugfs_register(q); 865a8ecdd71SBart Van Assche } 8669c1051aaSOmar Sandoval 867b89f625eSMing Lei mutex_lock(&q->sysfs_lock); 868a2247f19SDamien Le Moal 869a2247f19SDamien Le Moal ret = disk_register_independent_access_ranges(disk, NULL); 870a2247f19SDamien Le Moal if (ret) 871a2247f19SDamien Le Moal goto put_dev; 872a2247f19SDamien Le Moal 873344e9ffcSJens Axboe if (q->elevator) { 874cecf5d87SMing Lei ret = elv_register_queue(q, false); 875a2247f19SDamien Le Moal if (ret) 876a2247f19SDamien Le Moal goto put_dev; 877b410aff2STahsin Erdogan } 878cecf5d87SMing Lei 879cecf5d87SMing Lei blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 880cecf5d87SMing Lei wbt_enable_default(q); 881cecf5d87SMing Lei blk_throtl_register_queue(q); 882cecf5d87SMing Lei 883cecf5d87SMing Lei /* Now everything is ready and send out KOBJ_ADD uevent */ 884cecf5d87SMing Lei kobject_uevent(&q->kobj, KOBJ_ADD); 8850546858cSYufen Yu if (q->elevator) 886cecf5d87SMing Lei kobject_uevent(&q->elevator->kobj, KOBJ_ADD); 887cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock); 888cecf5d87SMing Lei 889b410aff2STahsin Erdogan ret = 0; 890b410aff2STahsin Erdogan unlock: 891cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 892a72c374fSMing Lei 893a72c374fSMing Lei /* 894a72c374fSMing Lei * SCSI probing may synchronously create and destroy a lot of 895a72c374fSMing Lei * request_queues for non-existent devices. Shutting down a fully 896a72c374fSMing Lei * functional queue takes measureable wallclock time as RCU grace 897a72c374fSMing Lei * periods are involved. To avoid excessive latency in these 898a72c374fSMing Lei * cases, a request_queue starts out in a degraded mode which is 899a72c374fSMing Lei * faster to shut down and is made fully functional here as 900a72c374fSMing Lei * request_queues for non-existent devices never get registered. 901a72c374fSMing Lei */ 902a72c374fSMing Lei if (!blk_queue_init_done(q)) { 903a72c374fSMing Lei blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); 904a72c374fSMing Lei percpu_ref_switch_to_percpu(&q->q_usage_counter); 905a72c374fSMing Lei } 906a72c374fSMing Lei 9078324aa91SJens Axboe return ret; 908a2247f19SDamien Le Moal 909a2247f19SDamien Le Moal put_dev: 910a2247f19SDamien Le Moal disk_unregister_independent_access_ranges(disk); 911a2247f19SDamien Le Moal mutex_unlock(&q->sysfs_lock); 912a2247f19SDamien Le Moal mutex_unlock(&q->sysfs_dir_lock); 913a2247f19SDamien Le Moal kobject_del(&q->kobj); 914a2247f19SDamien Le Moal blk_trace_remove_sysfs(dev); 915a2247f19SDamien Le Moal kobject_put(&dev->kobj); 916a2247f19SDamien Le Moal 917a2247f19SDamien Le Moal return ret; 9188324aa91SJens Axboe } 9198324aa91SJens Axboe 9202c2086afSBart Van Assche /** 9212c2086afSBart Van Assche * blk_unregister_queue - counterpart of blk_register_queue() 9222c2086afSBart Van Assche * @disk: Disk of which the request queue should be unregistered from sysfs. 9232c2086afSBart Van Assche * 9242c2086afSBart Van Assche * Note: the caller is responsible for guaranteeing that this function is called 9252c2086afSBart Van Assche * after blk_register_queue() has finished. 9262c2086afSBart Van Assche */ 9278324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 9288324aa91SJens Axboe { 9298324aa91SJens Axboe struct request_queue *q = disk->queue; 9308324aa91SJens Axboe 931fb199746SAkinobu Mita if (WARN_ON(!q)) 932fb199746SAkinobu Mita return; 933fb199746SAkinobu Mita 934fa70d2e2SMike Snitzer /* Return early if disk->queue was never registered. */ 93558c898baSMing Lei if (!blk_queue_registered(q)) 936fa70d2e2SMike Snitzer return; 937fa70d2e2SMike Snitzer 938667257e8SMike Snitzer /* 9392c2086afSBart Van Assche * Since sysfs_remove_dir() prevents adding new directory entries 9402c2086afSBart Van Assche * before removal of existing entries starts, protect against 9412c2086afSBart Van Assche * concurrent elv_iosched_store() calls. 942667257e8SMike Snitzer */ 943e9a823fbSDavid Jeffery mutex_lock(&q->sysfs_lock); 9448814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 945cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock); 946334335d2SOmar Sandoval 947cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock); 9482c2086afSBart Van Assche /* 9492c2086afSBart Van Assche * Remove the sysfs attributes before unregistering the queue data 9502c2086afSBart Van Assche * structures that can be modified through sysfs. 9512c2086afSBart Van Assche */ 952344e9ffcSJens Axboe if (queue_is_mq(q)) 953b21d5b30SMatias Bjørling blk_mq_unregister_dev(disk_to_dev(disk), q); 9548324aa91SJens Axboe 9558324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9568324aa91SJens Axboe kobject_del(&q->kobj); 95748c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 958667257e8SMike Snitzer 959b89f625eSMing Lei mutex_lock(&q->sysfs_lock); 960344e9ffcSJens Axboe if (q->elevator) 9612c2086afSBart Van Assche elv_unregister_queue(q); 962a2247f19SDamien Le Moal disk_unregister_independent_access_ranges(disk); 963b89f625eSMing Lei mutex_unlock(&q->sysfs_lock); 964cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 9652c2086afSBart Van Assche 9662c2086afSBart Van Assche kobject_put(&disk_to_dev(disk)->kobj); 9678324aa91SJens Axboe } 968