1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 28324aa91SJens Axboe /* 38324aa91SJens Axboe * Functions related to sysfs handling 48324aa91SJens Axboe */ 58324aa91SJens Axboe #include <linux/kernel.h> 65a0e3ad6STejun Heo #include <linux/slab.h> 78324aa91SJens Axboe #include <linux/module.h> 88324aa91SJens Axboe #include <linux/bio.h> 98324aa91SJens Axboe #include <linux/blkdev.h> 1066114cadSTejun Heo #include <linux/backing-dev.h> 118324aa91SJens Axboe #include <linux/blktrace_api.h> 12320ae51fSJens Axboe #include <linux/blk-mq.h> 13eea8f41cSTejun Heo #include <linux/blk-cgroup.h> 1485e0cbbbSLuis Chamberlain #include <linux/debugfs.h> 158324aa91SJens Axboe 168324aa91SJens Axboe #include "blk.h" 173edcc0ceSMing Lei #include "blk-mq.h" 18d173a251SOmar Sandoval #include "blk-mq-debugfs.h" 1987760e5eSJens Axboe #include "blk-wbt.h" 20a7b36ee6SJens Axboe #include "blk-throttle.h" 218324aa91SJens Axboe 228324aa91SJens Axboe struct queue_sysfs_entry { 238324aa91SJens Axboe struct attribute attr; 248324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 258324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 268324aa91SJens Axboe }; 278324aa91SJens Axboe 288324aa91SJens Axboe static ssize_t 299cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 308324aa91SJens Axboe { 319cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 328324aa91SJens Axboe } 338324aa91SJens Axboe 348324aa91SJens Axboe static ssize_t 358324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 368324aa91SJens Axboe { 37b1f3b64dSDave Reisner int err; 38b1f3b64dSDave Reisner unsigned long v; 398324aa91SJens Axboe 40ed751e68SJingoo Han err = kstrtoul(page, 10, &v); 41b1f3b64dSDave Reisner if (err || v > UINT_MAX) 42b1f3b64dSDave Reisner return -EINVAL; 43b1f3b64dSDave Reisner 44b1f3b64dSDave Reisner *var = v; 45b1f3b64dSDave Reisner 468324aa91SJens Axboe return count; 478324aa91SJens Axboe } 488324aa91SJens Axboe 4980e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page) 5087760e5eSJens Axboe { 5187760e5eSJens Axboe int err; 5280e091d1SJens Axboe s64 v; 5387760e5eSJens Axboe 5480e091d1SJens Axboe err = kstrtos64(page, 10, &v); 5587760e5eSJens Axboe if (err < 0) 5687760e5eSJens Axboe return err; 5787760e5eSJens Axboe 5887760e5eSJens Axboe *var = v; 5987760e5eSJens Axboe return 0; 6087760e5eSJens Axboe } 6187760e5eSJens Axboe 628324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 638324aa91SJens Axboe { 6428af7428SMax Gurtovoy return queue_var_show(q->nr_requests, page); 658324aa91SJens Axboe } 668324aa91SJens Axboe 678324aa91SJens Axboe static ssize_t 688324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 698324aa91SJens Axboe { 708324aa91SJens Axboe unsigned long nr; 71e3a2b3f9SJens Axboe int ret, err; 72b8a9ae77SJens Axboe 73344e9ffcSJens Axboe if (!queue_is_mq(q)) 74b8a9ae77SJens Axboe return -EINVAL; 75b8a9ae77SJens Axboe 76b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 77b1f3b64dSDave Reisner if (ret < 0) 78b1f3b64dSDave Reisner return ret; 79b1f3b64dSDave Reisner 808324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 818324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 828324aa91SJens Axboe 83e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr); 84e3a2b3f9SJens Axboe if (err) 85e3a2b3f9SJens Axboe return err; 86a051661cSTejun Heo 878324aa91SJens Axboe return ret; 888324aa91SJens Axboe } 898324aa91SJens Axboe 908324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 918324aa91SJens Axboe { 92edb0872fSChristoph Hellwig unsigned long ra_kb; 938324aa91SJens Axboe 94d152c682SChristoph Hellwig if (!q->disk) 95edb0872fSChristoph Hellwig return -EINVAL; 96d152c682SChristoph Hellwig ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); 978c390ff9SMax Gurtovoy return queue_var_show(ra_kb, page); 988324aa91SJens Axboe } 998324aa91SJens Axboe 1008324aa91SJens Axboe static ssize_t 1018324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 1028324aa91SJens Axboe { 1038324aa91SJens Axboe unsigned long ra_kb; 104edb0872fSChristoph Hellwig ssize_t ret; 1058324aa91SJens Axboe 106d152c682SChristoph Hellwig if (!q->disk) 107edb0872fSChristoph Hellwig return -EINVAL; 108edb0872fSChristoph Hellwig ret = queue_var_store(&ra_kb, page, count); 109b1f3b64dSDave Reisner if (ret < 0) 110b1f3b64dSDave Reisner return ret; 111d152c682SChristoph Hellwig q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 1128324aa91SJens Axboe return ret; 1138324aa91SJens Axboe } 1148324aa91SJens Axboe 1158324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1168324aa91SJens Axboe { 117ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1188324aa91SJens Axboe 1198c390ff9SMax Gurtovoy return queue_var_show(max_sectors_kb, page); 1208324aa91SJens Axboe } 1218324aa91SJens Axboe 122c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 123c77a5710SMartin K. Petersen { 1248c390ff9SMax Gurtovoy return queue_var_show(queue_max_segments(q), page); 125c77a5710SMartin K. Petersen } 126c77a5710SMartin K. Petersen 1271e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q, 1281e739730SChristoph Hellwig char *page) 1291e739730SChristoph Hellwig { 1308c390ff9SMax Gurtovoy return queue_var_show(queue_max_discard_segments(q), page); 1311e739730SChristoph Hellwig } 1321e739730SChristoph Hellwig 13313f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 13413f05c8dSMartin K. Petersen { 1358c390ff9SMax Gurtovoy return queue_var_show(q->limits.max_integrity_segments, page); 13613f05c8dSMartin K. Petersen } 13713f05c8dSMartin K. Petersen 138c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 139c77a5710SMartin K. Petersen { 1408c390ff9SMax Gurtovoy return queue_var_show(queue_max_segment_size(q), page); 141c77a5710SMartin K. Petersen } 142c77a5710SMartin K. Petersen 143e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 144e68b903cSMartin K. Petersen { 145e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 146e68b903cSMartin K. Petersen } 147e68b903cSMartin K. Petersen 148c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 149c72758f3SMartin K. Petersen { 150c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 151c72758f3SMartin K. Petersen } 152c72758f3SMartin K. Petersen 15387caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 15487caf97cSHannes Reinecke { 15587caf97cSHannes Reinecke return queue_var_show(q->limits.chunk_sectors, page); 15687caf97cSHannes Reinecke } 15787caf97cSHannes Reinecke 158c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 159c72758f3SMartin K. Petersen { 160c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 161c72758f3SMartin K. Petersen } 162c72758f3SMartin K. Petersen 163c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 164c72758f3SMartin K. Petersen { 165c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1668324aa91SJens Axboe } 1678324aa91SJens Axboe 16886b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 16986b37281SMartin K. Petersen { 17086b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 17186b37281SMartin K. Petersen } 17286b37281SMartin K. Petersen 1730034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 1740034af03SJens Axboe { 1750034af03SJens Axboe 17618f922d0SAlan return sprintf(page, "%llu\n", 17718f922d0SAlan (unsigned long long)q->limits.max_hw_discard_sectors << 9); 1780034af03SJens Axboe } 1790034af03SJens Axboe 18086b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 18186b37281SMartin K. Petersen { 182a934a00aSMartin K. Petersen return sprintf(page, "%llu\n", 183a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9); 18486b37281SMartin K. Petersen } 18586b37281SMartin K. Petersen 1860034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q, 1870034af03SJens Axboe const char *page, size_t count) 1880034af03SJens Axboe { 1890034af03SJens Axboe unsigned long max_discard; 1900034af03SJens Axboe ssize_t ret = queue_var_store(&max_discard, page, count); 1910034af03SJens Axboe 1920034af03SJens Axboe if (ret < 0) 1930034af03SJens Axboe return ret; 1940034af03SJens Axboe 1950034af03SJens Axboe if (max_discard & (q->limits.discard_granularity - 1)) 1960034af03SJens Axboe return -EINVAL; 1970034af03SJens Axboe 1980034af03SJens Axboe max_discard >>= 9; 1990034af03SJens Axboe if (max_discard > UINT_MAX) 2000034af03SJens Axboe return -EINVAL; 2010034af03SJens Axboe 2020034af03SJens Axboe if (max_discard > q->limits.max_hw_discard_sectors) 2030034af03SJens Axboe max_discard = q->limits.max_hw_discard_sectors; 2040034af03SJens Axboe 2050034af03SJens Axboe q->limits.max_discard_sectors = max_discard; 2060034af03SJens Axboe return ret; 2070034af03SJens Axboe } 2080034af03SJens Axboe 20998262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 21098262f27SMartin K. Petersen { 21148920ff2SChristoph Hellwig return queue_var_show(0, page); 21298262f27SMartin K. Petersen } 21398262f27SMartin K. Petersen 2144363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 2154363ac7cSMartin K. Petersen { 2164363ac7cSMartin K. Petersen return sprintf(page, "%llu\n", 2174363ac7cSMartin K. Petersen (unsigned long long)q->limits.max_write_same_sectors << 9); 2184363ac7cSMartin K. Petersen } 2194363ac7cSMartin K. Petersen 220a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 221a6f0788eSChaitanya Kulkarni { 222a6f0788eSChaitanya Kulkarni return sprintf(page, "%llu\n", 223a6f0788eSChaitanya Kulkarni (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 224a6f0788eSChaitanya Kulkarni } 2254363ac7cSMartin K. Petersen 226a805a4faSDamien Le Moal static ssize_t queue_zone_write_granularity_show(struct request_queue *q, 227a805a4faSDamien Le Moal char *page) 228a805a4faSDamien Le Moal { 229a805a4faSDamien Le Moal return queue_var_show(queue_zone_write_granularity(q), page); 230a805a4faSDamien Le Moal } 231a805a4faSDamien Le Moal 2320512a75bSKeith Busch static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) 2330512a75bSKeith Busch { 2340512a75bSKeith Busch unsigned long long max_sectors = q->limits.max_zone_append_sectors; 2350512a75bSKeith Busch 2360512a75bSKeith Busch return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); 2370512a75bSKeith Busch } 2380512a75bSKeith Busch 2398324aa91SJens Axboe static ssize_t 2408324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 2418324aa91SJens Axboe { 2428324aa91SJens Axboe unsigned long max_sectors_kb, 243ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 24409cbfeafSKirill A. Shutemov page_kb = 1 << (PAGE_SHIFT - 10); 2458324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 2468324aa91SJens Axboe 247b1f3b64dSDave Reisner if (ret < 0) 248b1f3b64dSDave Reisner return ret; 249b1f3b64dSDave Reisner 250ca369d51SMartin K. Petersen max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 251ca369d51SMartin K. Petersen q->limits.max_dev_sectors >> 1); 252ca369d51SMartin K. Petersen 2538324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 2548324aa91SJens Axboe return -EINVAL; 2557c239517SWu Fengguang 2560d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 257c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 258d152c682SChristoph Hellwig if (q->disk) 259d152c682SChristoph Hellwig q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 2600d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 2618324aa91SJens Axboe 2628324aa91SJens Axboe return ret; 2638324aa91SJens Axboe } 2648324aa91SJens Axboe 2658324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 2668324aa91SJens Axboe { 267ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 2688324aa91SJens Axboe 2698c390ff9SMax Gurtovoy return queue_var_show(max_hw_sectors_kb, page); 2708324aa91SJens Axboe } 2718324aa91SJens Axboe 27228af7428SMax Gurtovoy static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) 27328af7428SMax Gurtovoy { 2748c390ff9SMax Gurtovoy return queue_var_show(q->limits.virt_boundary_mask, page); 27528af7428SMax Gurtovoy } 27628af7428SMax Gurtovoy 277956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 278956bcb7cSJens Axboe static ssize_t \ 279fc93fe14SChristoph Hellwig queue_##name##_show(struct request_queue *q, char *page) \ 280956bcb7cSJens Axboe { \ 281956bcb7cSJens Axboe int bit; \ 282956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 283956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 284956bcb7cSJens Axboe } \ 285956bcb7cSJens Axboe static ssize_t \ 286fc93fe14SChristoph Hellwig queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ 287956bcb7cSJens Axboe { \ 288956bcb7cSJens Axboe unsigned long val; \ 289956bcb7cSJens Axboe ssize_t ret; \ 290956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 291c678ef52SArnd Bergmann if (ret < 0) \ 292c678ef52SArnd Bergmann return ret; \ 293956bcb7cSJens Axboe if (neg) \ 294956bcb7cSJens Axboe val = !val; \ 295956bcb7cSJens Axboe \ 296956bcb7cSJens Axboe if (val) \ 2978814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ 298956bcb7cSJens Axboe else \ 2998814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ 300956bcb7cSJens Axboe return ret; \ 3011308835fSBartlomiej Zolnierkiewicz } 3021308835fSBartlomiej Zolnierkiewicz 303956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 304956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 305956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 3061cb039f3SChristoph Hellwig QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); 307956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 3081308835fSBartlomiej Zolnierkiewicz 309797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page) 310797476b8SDamien Le Moal { 311797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 312797476b8SDamien Le Moal case BLK_ZONED_HA: 313797476b8SDamien Le Moal return sprintf(page, "host-aware\n"); 314797476b8SDamien Le Moal case BLK_ZONED_HM: 315797476b8SDamien Le Moal return sprintf(page, "host-managed\n"); 316797476b8SDamien Le Moal default: 317797476b8SDamien Le Moal return sprintf(page, "none\n"); 318797476b8SDamien Le Moal } 319797476b8SDamien Le Moal } 320797476b8SDamien Le Moal 321965b652eSDamien Le Moal static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) 322965b652eSDamien Le Moal { 323965b652eSDamien Le Moal return queue_var_show(blk_queue_nr_zones(q), page); 324965b652eSDamien Le Moal } 325965b652eSDamien Le Moal 326e15864f8SNiklas Cassel static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) 327e15864f8SNiklas Cassel { 328e15864f8SNiklas Cassel return queue_var_show(queue_max_open_zones(q), page); 329e15864f8SNiklas Cassel } 330e15864f8SNiklas Cassel 331659bf827SNiklas Cassel static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) 332659bf827SNiklas Cassel { 333659bf827SNiklas Cassel return queue_var_show(queue_max_active_zones(q), page); 334659bf827SNiklas Cassel } 335659bf827SNiklas Cassel 336ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 337ac9fafa1SAlan D. Brunelle { 338488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 339488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 340ac9fafa1SAlan D. Brunelle } 341ac9fafa1SAlan D. Brunelle 342ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 343ac9fafa1SAlan D. Brunelle size_t count) 344ac9fafa1SAlan D. Brunelle { 345ac9fafa1SAlan D. Brunelle unsigned long nm; 346ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 347ac9fafa1SAlan D. Brunelle 348b1f3b64dSDave Reisner if (ret < 0) 349b1f3b64dSDave Reisner return ret; 350b1f3b64dSDave Reisner 35157d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 35257d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 353488991e2SAlan D. Brunelle if (nm == 2) 35457d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 355488991e2SAlan D. Brunelle else if (nm) 35657d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 3571308835fSBartlomiej Zolnierkiewicz 358ac9fafa1SAlan D. Brunelle return ret; 359ac9fafa1SAlan D. Brunelle } 360ac9fafa1SAlan D. Brunelle 361c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 362c7c22e4dSJens Axboe { 3639cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 3645757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 365c7c22e4dSJens Axboe 3665757a6d7SDan Williams return queue_var_show(set << force, page); 367c7c22e4dSJens Axboe } 368c7c22e4dSJens Axboe 369c7c22e4dSJens Axboe static ssize_t 370c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 371c7c22e4dSJens Axboe { 372c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 3730a06ff06SChristoph Hellwig #ifdef CONFIG_SMP 374c7c22e4dSJens Axboe unsigned long val; 375c7c22e4dSJens Axboe 376c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 377b1f3b64dSDave Reisner if (ret < 0) 378b1f3b64dSDave Reisner return ret; 379b1f3b64dSDave Reisner 380e8037d49SEric Seppanen if (val == 2) { 38157d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 38257d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 383e8037d49SEric Seppanen } else if (val == 1) { 38457d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 38557d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 386e8037d49SEric Seppanen } else if (val == 0) { 38757d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 38857d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 3895757a6d7SDan Williams } 390c7c22e4dSJens Axboe #endif 391c7c22e4dSJens Axboe return ret; 392c7c22e4dSJens Axboe } 3938324aa91SJens Axboe 39406426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 39506426adfSJens Axboe { 39664f1c21eSJens Axboe int val; 39764f1c21eSJens Axboe 39829ece8b4SYufen Yu if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) 39929ece8b4SYufen Yu val = BLK_MQ_POLL_CLASSIC; 40064f1c21eSJens Axboe else 40164f1c21eSJens Axboe val = q->poll_nsec / 1000; 40264f1c21eSJens Axboe 40364f1c21eSJens Axboe return sprintf(page, "%d\n", val); 40406426adfSJens Axboe } 40506426adfSJens Axboe 40606426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 40706426adfSJens Axboe size_t count) 40806426adfSJens Axboe { 40964f1c21eSJens Axboe int err, val; 41006426adfSJens Axboe 41106426adfSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 41206426adfSJens Axboe return -EINVAL; 41306426adfSJens Axboe 41464f1c21eSJens Axboe err = kstrtoint(page, 10, &val); 41564f1c21eSJens Axboe if (err < 0) 41664f1c21eSJens Axboe return err; 41706426adfSJens Axboe 41829ece8b4SYufen Yu if (val == BLK_MQ_POLL_CLASSIC) 41929ece8b4SYufen Yu q->poll_nsec = BLK_MQ_POLL_CLASSIC; 42029ece8b4SYufen Yu else if (val >= 0) 42164f1c21eSJens Axboe q->poll_nsec = val * 1000; 42229ece8b4SYufen Yu else 42329ece8b4SYufen Yu return -EINVAL; 42464f1c21eSJens Axboe 42564f1c21eSJens Axboe return count; 42606426adfSJens Axboe } 42706426adfSJens Axboe 42805229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page) 42905229beeSJens Axboe { 43005229beeSJens Axboe return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 43105229beeSJens Axboe } 43205229beeSJens Axboe 43305229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page, 43405229beeSJens Axboe size_t count) 43505229beeSJens Axboe { 436a614dd22SChristoph Hellwig if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) 43705229beeSJens Axboe return -EINVAL; 438a614dd22SChristoph Hellwig pr_info_ratelimited("writes to the poll attribute are ignored.\n"); 439a614dd22SChristoph Hellwig pr_info_ratelimited("please use driver specific parameters instead.\n"); 440a614dd22SChristoph Hellwig return count; 44105229beeSJens Axboe } 44205229beeSJens Axboe 44365cd1d13SWeiping Zhang static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) 44465cd1d13SWeiping Zhang { 44565cd1d13SWeiping Zhang return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); 44665cd1d13SWeiping Zhang } 44765cd1d13SWeiping Zhang 44865cd1d13SWeiping Zhang static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, 44965cd1d13SWeiping Zhang size_t count) 45065cd1d13SWeiping Zhang { 45165cd1d13SWeiping Zhang unsigned int val; 45265cd1d13SWeiping Zhang int err; 45365cd1d13SWeiping Zhang 45465cd1d13SWeiping Zhang err = kstrtou32(page, 10, &val); 45565cd1d13SWeiping Zhang if (err || val == 0) 45665cd1d13SWeiping Zhang return -EINVAL; 45765cd1d13SWeiping Zhang 45865cd1d13SWeiping Zhang blk_queue_rq_timeout(q, msecs_to_jiffies(val)); 45965cd1d13SWeiping Zhang 46065cd1d13SWeiping Zhang return count; 46165cd1d13SWeiping Zhang } 46265cd1d13SWeiping Zhang 46387760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 46487760e5eSJens Axboe { 465a7905043SJosef Bacik if (!wbt_rq_qos(q)) 46687760e5eSJens Axboe return -EINVAL; 46787760e5eSJens Axboe 468a7905043SJosef Bacik return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); 46987760e5eSJens Axboe } 47087760e5eSJens Axboe 47187760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 47287760e5eSJens Axboe size_t count) 47387760e5eSJens Axboe { 474a7905043SJosef Bacik struct rq_qos *rqos; 47587760e5eSJens Axboe ssize_t ret; 47680e091d1SJens Axboe s64 val; 47787760e5eSJens Axboe 47887760e5eSJens Axboe ret = queue_var_store64(&val, page); 47987760e5eSJens Axboe if (ret < 0) 48087760e5eSJens Axboe return ret; 481d62118b6SJens Axboe if (val < -1) 482d62118b6SJens Axboe return -EINVAL; 483d62118b6SJens Axboe 484a7905043SJosef Bacik rqos = wbt_rq_qos(q); 485a7905043SJosef Bacik if (!rqos) { 486d62118b6SJens Axboe ret = wbt_init(q); 487d62118b6SJens Axboe if (ret) 488d62118b6SJens Axboe return ret; 489d62118b6SJens Axboe } 49087760e5eSJens Axboe 49180e091d1SJens Axboe if (val == -1) 492a7905043SJosef Bacik val = wbt_default_latency_nsec(q); 49380e091d1SJens Axboe else if (val >= 0) 494a7905043SJosef Bacik val *= 1000ULL; 495d62118b6SJens Axboe 496b7143fe6SAleksei Zakharov if (wbt_get_min_lat(q) == val) 497b7143fe6SAleksei Zakharov return count; 498b7143fe6SAleksei Zakharov 499c125311dSJens Axboe /* 500c125311dSJens Axboe * Ensure that the queue is idled, in case the latency update 501c125311dSJens Axboe * ends up either enabling or disabling wbt completely. We can't 502c125311dSJens Axboe * have IO inflight if that happens. 503c125311dSJens Axboe */ 504c125311dSJens Axboe blk_mq_freeze_queue(q); 505c125311dSJens Axboe blk_mq_quiesce_queue(q); 50680e091d1SJens Axboe 507c125311dSJens Axboe wbt_set_min_lat(q, val); 508c125311dSJens Axboe 509c125311dSJens Axboe blk_mq_unquiesce_queue(q); 510c125311dSJens Axboe blk_mq_unfreeze_queue(q); 511c125311dSJens Axboe 51287760e5eSJens Axboe return count; 51387760e5eSJens Axboe } 51487760e5eSJens Axboe 51593e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page) 51693e9d8e8SJens Axboe { 51793e9d8e8SJens Axboe if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 51893e9d8e8SJens Axboe return sprintf(page, "write back\n"); 51993e9d8e8SJens Axboe 52093e9d8e8SJens Axboe return sprintf(page, "write through\n"); 52193e9d8e8SJens Axboe } 52293e9d8e8SJens Axboe 52393e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page, 52493e9d8e8SJens Axboe size_t count) 52593e9d8e8SJens Axboe { 52693e9d8e8SJens Axboe int set = -1; 52793e9d8e8SJens Axboe 52893e9d8e8SJens Axboe if (!strncmp(page, "write back", 10)) 52993e9d8e8SJens Axboe set = 1; 53093e9d8e8SJens Axboe else if (!strncmp(page, "write through", 13) || 53193e9d8e8SJens Axboe !strncmp(page, "none", 4)) 53293e9d8e8SJens Axboe set = 0; 53393e9d8e8SJens Axboe 53493e9d8e8SJens Axboe if (set == -1) 53593e9d8e8SJens Axboe return -EINVAL; 53693e9d8e8SJens Axboe 53793e9d8e8SJens Axboe if (set) 5388814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_WC, q); 53993e9d8e8SJens Axboe else 5408814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_WC, q); 54193e9d8e8SJens Axboe 54293e9d8e8SJens Axboe return count; 54393e9d8e8SJens Axboe } 54493e9d8e8SJens Axboe 5456fcefbe5SKent Overstreet static ssize_t queue_fua_show(struct request_queue *q, char *page) 5466fcefbe5SKent Overstreet { 5476fcefbe5SKent Overstreet return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); 5486fcefbe5SKent Overstreet } 5496fcefbe5SKent Overstreet 550ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page) 551ea6ca600SYigal Korman { 552ea6ca600SYigal Korman return queue_var_show(blk_queue_dax(q), page); 553ea6ca600SYigal Korman } 554ea6ca600SYigal Korman 55535626147SChristoph Hellwig #define QUEUE_RO_ENTRY(_prefix, _name) \ 55635626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = { \ 55735626147SChristoph Hellwig .attr = { .name = _name, .mode = 0444 }, \ 55835626147SChristoph Hellwig .show = _prefix##_show, \ 5598324aa91SJens Axboe }; 5608324aa91SJens Axboe 56135626147SChristoph Hellwig #define QUEUE_RW_ENTRY(_prefix, _name) \ 56235626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = { \ 56335626147SChristoph Hellwig .attr = { .name = _name, .mode = 0644 }, \ 56435626147SChristoph Hellwig .show = _prefix##_show, \ 56535626147SChristoph Hellwig .store = _prefix##_store, \ 5668324aa91SJens Axboe }; 5678324aa91SJens Axboe 56835626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_requests, "nr_requests"); 56935626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); 57035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); 57135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); 57235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); 57335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); 57435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); 57535626147SChristoph Hellwig QUEUE_RW_ENTRY(elv_iosched, "scheduler"); 5768324aa91SJens Axboe 57735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); 57835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); 57935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); 58035626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); 58135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); 5828324aa91SJens Axboe 58335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); 58435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); 58535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); 58635626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); 58735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); 588c77a5710SMartin K. Petersen 58935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); 59035626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); 59135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); 592a805a4faSDamien Le Moal QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); 5931e739730SChristoph Hellwig 59435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zoned, "zoned"); 59535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); 59635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); 59735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); 59813f05c8dSMartin K. Petersen 59935626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); 60035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); 60135626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll, "io_poll"); 60235626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); 60335626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wc, "write_cache"); 60435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_fua, "fua"); 60535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_dax, "dax"); 60635626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); 60735626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); 60828af7428SMax Gurtovoy QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); 609c77a5710SMartin K. Petersen 61035626147SChristoph Hellwig #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 61135626147SChristoph Hellwig QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); 61235626147SChristoph Hellwig #endif 6138324aa91SJens Axboe 61435626147SChristoph Hellwig /* legacy alias for logical_block_size: */ 615e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 6165657a819SJoe Perches .attr = {.name = "hw_sector_size", .mode = 0444 }, 617e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 618e1defc4fSMartin K. Petersen }; 619e1defc4fSMartin K. Petersen 620fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_nonrot, "rotational"); 621fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_iostats, "iostats"); 622fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_random, "add_random"); 6231cb039f3SChristoph Hellwig QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); 624e2e1a148SJens Axboe 6254d25339eSWeiping Zhang static struct attribute *queue_attrs[] = { 6268324aa91SJens Axboe &queue_requests_entry.attr, 6278324aa91SJens Axboe &queue_ra_entry.attr, 6288324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 6298324aa91SJens Axboe &queue_max_sectors_entry.attr, 630c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 6311e739730SChristoph Hellwig &queue_max_discard_segments_entry.attr, 63213f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 633c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 63435626147SChristoph Hellwig &elv_iosched_entry.attr, 635e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 636e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 637c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 63887caf97cSHannes Reinecke &queue_chunk_sectors_entry.attr, 639c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 640c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 64186b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 64286b37281SMartin K. Petersen &queue_discard_max_entry.attr, 6430034af03SJens Axboe &queue_discard_max_hw_entry.attr, 64498262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 6454363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr, 646a6f0788eSChaitanya Kulkarni &queue_write_zeroes_max_entry.attr, 6470512a75bSKeith Busch &queue_zone_append_max_entry.attr, 648a805a4faSDamien Le Moal &queue_zone_write_granularity_entry.attr, 6491308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 650797476b8SDamien Le Moal &queue_zoned_entry.attr, 651965b652eSDamien Le Moal &queue_nr_zones_entry.attr, 652e15864f8SNiklas Cassel &queue_max_open_zones_entry.attr, 653659bf827SNiklas Cassel &queue_max_active_zones_entry.attr, 654ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 655c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 656bc58ba94SJens Axboe &queue_iostats_entry.attr, 6571cb039f3SChristoph Hellwig &queue_stable_writes_entry.attr, 658e2e1a148SJens Axboe &queue_random_entry.attr, 65905229beeSJens Axboe &queue_poll_entry.attr, 66093e9d8e8SJens Axboe &queue_wc_entry.attr, 6616fcefbe5SKent Overstreet &queue_fua_entry.attr, 662ea6ca600SYigal Korman &queue_dax_entry.attr, 66387760e5eSJens Axboe &queue_wb_lat_entry.attr, 66406426adfSJens Axboe &queue_poll_delay_entry.attr, 66565cd1d13SWeiping Zhang &queue_io_timeout_entry.attr, 666297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 66735626147SChristoph Hellwig &blk_throtl_sample_time_entry.attr, 668297e3d85SShaohua Li #endif 66928af7428SMax Gurtovoy &queue_virt_boundary_mask_entry.attr, 6708324aa91SJens Axboe NULL, 6718324aa91SJens Axboe }; 6728324aa91SJens Axboe 6734d25339eSWeiping Zhang static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, 6744d25339eSWeiping Zhang int n) 6754d25339eSWeiping Zhang { 6764d25339eSWeiping Zhang struct request_queue *q = 6774d25339eSWeiping Zhang container_of(kobj, struct request_queue, kobj); 6784d25339eSWeiping Zhang 6794d25339eSWeiping Zhang if (attr == &queue_io_timeout_entry.attr && 6804d25339eSWeiping Zhang (!q->mq_ops || !q->mq_ops->timeout)) 6814d25339eSWeiping Zhang return 0; 6824d25339eSWeiping Zhang 683659bf827SNiklas Cassel if ((attr == &queue_max_open_zones_entry.attr || 684659bf827SNiklas Cassel attr == &queue_max_active_zones_entry.attr) && 685e15864f8SNiklas Cassel !blk_queue_is_zoned(q)) 686e15864f8SNiklas Cassel return 0; 687e15864f8SNiklas Cassel 6884d25339eSWeiping Zhang return attr->mode; 6894d25339eSWeiping Zhang } 6904d25339eSWeiping Zhang 6914d25339eSWeiping Zhang static struct attribute_group queue_attr_group = { 6924d25339eSWeiping Zhang .attrs = queue_attrs, 6934d25339eSWeiping Zhang .is_visible = queue_attr_visible, 6944d25339eSWeiping Zhang }; 6954d25339eSWeiping Zhang 6964d25339eSWeiping Zhang 6978324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 6988324aa91SJens Axboe 6998324aa91SJens Axboe static ssize_t 7008324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 7018324aa91SJens Axboe { 7028324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7038324aa91SJens Axboe struct request_queue *q = 7048324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 7058324aa91SJens Axboe ssize_t res; 7068324aa91SJens Axboe 7078324aa91SJens Axboe if (!entry->show) 7088324aa91SJens Axboe return -EIO; 7098324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7108324aa91SJens Axboe res = entry->show(q, page); 7118324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7128324aa91SJens Axboe return res; 7138324aa91SJens Axboe } 7148324aa91SJens Axboe 7158324aa91SJens Axboe static ssize_t 7168324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 7178324aa91SJens Axboe const char *page, size_t length) 7188324aa91SJens Axboe { 7198324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7206728cb0eSJens Axboe struct request_queue *q; 7218324aa91SJens Axboe ssize_t res; 7228324aa91SJens Axboe 7238324aa91SJens Axboe if (!entry->store) 7248324aa91SJens Axboe return -EIO; 7256728cb0eSJens Axboe 7266728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 7278324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7288324aa91SJens Axboe res = entry->store(q, page, length); 7298324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7308324aa91SJens Axboe return res; 7318324aa91SJens Axboe } 7328324aa91SJens Axboe 733548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head) 734548bc8e1STejun Heo { 735548bc8e1STejun Heo struct request_queue *q = container_of(rcu_head, struct request_queue, 736548bc8e1STejun Heo rcu_head); 737548bc8e1STejun Heo kmem_cache_free(blk_requestq_cachep, q); 738548bc8e1STejun Heo } 739548bc8e1STejun Heo 74047cdee29SMing Lei /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ 74147cdee29SMing Lei static void blk_exit_queue(struct request_queue *q) 74247cdee29SMing Lei { 74347cdee29SMing Lei /* 74447cdee29SMing Lei * Since the I/O scheduler exit code may access cgroup information, 74547cdee29SMing Lei * perform I/O scheduler exit before disassociating from the block 74647cdee29SMing Lei * cgroup controller. 74747cdee29SMing Lei */ 74847cdee29SMing Lei if (q->elevator) { 74947cdee29SMing Lei ioc_clear_queue(q); 750c3e22192SMing Lei __elevator_exit(q, q->elevator); 75147cdee29SMing Lei } 75247cdee29SMing Lei 75347cdee29SMing Lei /* 75447cdee29SMing Lei * Remove all references to @q from the block cgroup controller before 75547cdee29SMing Lei * restoring @q->queue_lock to avoid that restoring this pointer causes 75647cdee29SMing Lei * e.g. blkcg_print_blkgs() to crash. 75747cdee29SMing Lei */ 75847cdee29SMing Lei blkcg_exit_queue(q); 75947cdee29SMing Lei } 76047cdee29SMing Lei 7618324aa91SJens Axboe /** 762e8c7d14aSLuis Chamberlain * blk_release_queue - releases all allocated resources of the request_queue 763e8c7d14aSLuis Chamberlain * @kobj: pointer to a kobject, whose container is a request_queue 7648324aa91SJens Axboe * 765e8c7d14aSLuis Chamberlain * This function releases all allocated resources of the request queue. 766e8c7d14aSLuis Chamberlain * 767e8c7d14aSLuis Chamberlain * The struct request_queue refcount is incremented with blk_get_queue() and 768e8c7d14aSLuis Chamberlain * decremented with blk_put_queue(). Once the refcount reaches 0 this function 769e8c7d14aSLuis Chamberlain * is called. 770e8c7d14aSLuis Chamberlain * 771e8c7d14aSLuis Chamberlain * For drivers that have a request_queue on a gendisk and added with 772e8c7d14aSLuis Chamberlain * __device_add_disk() the refcount to request_queue will reach 0 with 773e8c7d14aSLuis Chamberlain * the last put_disk() called by the driver. For drivers which don't use 774e8c7d14aSLuis Chamberlain * __device_add_disk() this happens with blk_cleanup_queue(). 775e8c7d14aSLuis Chamberlain * 776e8c7d14aSLuis Chamberlain * Drivers exist which depend on the release of the request_queue to be 777e8c7d14aSLuis Chamberlain * synchronous, it should not be deferred. 778e8c7d14aSLuis Chamberlain * 779e8c7d14aSLuis Chamberlain * Context: can sleep 780dc9edc44SBart Van Assche */ 781e8c7d14aSLuis Chamberlain static void blk_release_queue(struct kobject *kobj) 7828324aa91SJens Axboe { 783e8c7d14aSLuis Chamberlain struct request_queue *q = 784e8c7d14aSLuis Chamberlain container_of(kobj, struct request_queue, kobj); 785e8c7d14aSLuis Chamberlain 786e8c7d14aSLuis Chamberlain might_sleep(); 7878324aa91SJens Axboe 78834dbad5dSOmar Sandoval if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 78934dbad5dSOmar Sandoval blk_stat_remove_callback(q, q->poll_cb); 79034dbad5dSOmar Sandoval blk_stat_free_callback(q->poll_cb); 791777eb1bfSHannes Reinecke 79234dbad5dSOmar Sandoval blk_free_queue_stats(q->stats); 79334dbad5dSOmar Sandoval 79447ce030bSYang Yang if (queue_is_mq(q)) { 79547ce030bSYang Yang struct blk_mq_hw_ctx *hctx; 79647ce030bSYang Yang int i; 79747ce030bSYang Yang 798e26cc082Szhengbin cancel_delayed_work_sync(&q->requeue_work); 799e26cc082Szhengbin 80047ce030bSYang Yang queue_for_each_hw_ctx(q, hctx, i) 80147ce030bSYang Yang cancel_delayed_work_sync(&hctx->run_work); 80247ce030bSYang Yang } 80347ce030bSYang Yang 80447cdee29SMing Lei blk_exit_queue(q); 80547cdee29SMing Lei 806bf505456SDamien Le Moal blk_queue_free_zone_bitmaps(q); 807bf505456SDamien Le Moal 808344e9ffcSJens Axboe if (queue_is_mq(q)) 809e09aae7eSMing Lei blk_mq_release(q); 81018741986SChristoph Hellwig 8118324aa91SJens Axboe blk_trace_shutdown(q); 81285e0cbbbSLuis Chamberlain mutex_lock(&q->debugfs_mutex); 81385e0cbbbSLuis Chamberlain debugfs_remove_recursive(q->debugfs_dir); 81485e0cbbbSLuis Chamberlain mutex_unlock(&q->debugfs_mutex); 8158324aa91SJens Axboe 816344e9ffcSJens Axboe if (queue_is_mq(q)) 81762ebce16SOmar Sandoval blk_mq_debugfs_unregister(q); 81862ebce16SOmar Sandoval 819338aa96dSKent Overstreet bioset_exit(&q->bio_split); 82054efd50bSKent Overstreet 821a73f730dSTejun Heo ida_simple_remove(&blk_queue_ida, q->id); 822548bc8e1STejun Heo call_rcu(&q->rcu_head, blk_free_queue_rcu); 8238324aa91SJens Axboe } 8248324aa91SJens Axboe 82552cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 8268324aa91SJens Axboe .show = queue_attr_show, 8278324aa91SJens Axboe .store = queue_attr_store, 8288324aa91SJens Axboe }; 8298324aa91SJens Axboe 8308324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 8318324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 8328324aa91SJens Axboe .release = blk_release_queue, 8338324aa91SJens Axboe }; 8348324aa91SJens Axboe 8352c2086afSBart Van Assche /** 8362c2086afSBart Van Assche * blk_register_queue - register a block layer queue with sysfs 8372c2086afSBart Van Assche * @disk: Disk of which the request queue should be registered with sysfs. 8382c2086afSBart Van Assche */ 8398324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 8408324aa91SJens Axboe { 8418324aa91SJens Axboe int ret; 8421d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 8438324aa91SJens Axboe struct request_queue *q = disk->queue; 8448324aa91SJens Axboe 8451d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 8461d54ad6dSLi Zefan if (ret) 8471d54ad6dSLi Zefan return ret; 8481d54ad6dSLi Zefan 849cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock); 850b410aff2STahsin Erdogan 851c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 852ed5302d3SLiu Yuan if (ret < 0) { 853ed5302d3SLiu Yuan blk_trace_remove_sysfs(dev); 854b410aff2STahsin Erdogan goto unlock; 855ed5302d3SLiu Yuan } 8568324aa91SJens Axboe 8574d25339eSWeiping Zhang ret = sysfs_create_group(&q->kobj, &queue_attr_group); 8584d25339eSWeiping Zhang if (ret) { 8594d25339eSWeiping Zhang blk_trace_remove_sysfs(dev); 8604d25339eSWeiping Zhang kobject_del(&q->kobj); 8614d25339eSWeiping Zhang kobject_put(&dev->kobj); 8624d25339eSWeiping Zhang goto unlock; 8634d25339eSWeiping Zhang } 8644d25339eSWeiping Zhang 86585e0cbbbSLuis Chamberlain mutex_lock(&q->debugfs_mutex); 86685e0cbbbSLuis Chamberlain q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), 86785e0cbbbSLuis Chamberlain blk_debugfs_root); 86885e0cbbbSLuis Chamberlain mutex_unlock(&q->debugfs_mutex); 86985e0cbbbSLuis Chamberlain 870344e9ffcSJens Axboe if (queue_is_mq(q)) { 8712d0364c8SBart Van Assche __blk_mq_register_dev(dev, q); 8729c1051aaSOmar Sandoval blk_mq_debugfs_register(q); 873a8ecdd71SBart Van Assche } 8749c1051aaSOmar Sandoval 875b89f625eSMing Lei mutex_lock(&q->sysfs_lock); 876*a2247f19SDamien Le Moal 877*a2247f19SDamien Le Moal ret = disk_register_independent_access_ranges(disk, NULL); 878*a2247f19SDamien Le Moal if (ret) 879*a2247f19SDamien Le Moal goto put_dev; 880*a2247f19SDamien Le Moal 881344e9ffcSJens Axboe if (q->elevator) { 882cecf5d87SMing Lei ret = elv_register_queue(q, false); 883*a2247f19SDamien Le Moal if (ret) 884*a2247f19SDamien Le Moal goto put_dev; 885b410aff2STahsin Erdogan } 886cecf5d87SMing Lei 887cecf5d87SMing Lei blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 888cecf5d87SMing Lei wbt_enable_default(q); 889cecf5d87SMing Lei blk_throtl_register_queue(q); 890cecf5d87SMing Lei 891cecf5d87SMing Lei /* Now everything is ready and send out KOBJ_ADD uevent */ 892cecf5d87SMing Lei kobject_uevent(&q->kobj, KOBJ_ADD); 8930546858cSYufen Yu if (q->elevator) 894cecf5d87SMing Lei kobject_uevent(&q->elevator->kobj, KOBJ_ADD); 895cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock); 896cecf5d87SMing Lei 897b410aff2STahsin Erdogan ret = 0; 898b410aff2STahsin Erdogan unlock: 899cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 900a72c374fSMing Lei 901a72c374fSMing Lei /* 902a72c374fSMing Lei * SCSI probing may synchronously create and destroy a lot of 903a72c374fSMing Lei * request_queues for non-existent devices. Shutting down a fully 904a72c374fSMing Lei * functional queue takes measureable wallclock time as RCU grace 905a72c374fSMing Lei * periods are involved. To avoid excessive latency in these 906a72c374fSMing Lei * cases, a request_queue starts out in a degraded mode which is 907a72c374fSMing Lei * faster to shut down and is made fully functional here as 908a72c374fSMing Lei * request_queues for non-existent devices never get registered. 909a72c374fSMing Lei */ 910a72c374fSMing Lei if (!blk_queue_init_done(q)) { 911a72c374fSMing Lei blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); 912a72c374fSMing Lei percpu_ref_switch_to_percpu(&q->q_usage_counter); 913a72c374fSMing Lei } 914a72c374fSMing Lei 9158324aa91SJens Axboe return ret; 916*a2247f19SDamien Le Moal 917*a2247f19SDamien Le Moal put_dev: 918*a2247f19SDamien Le Moal disk_unregister_independent_access_ranges(disk); 919*a2247f19SDamien Le Moal mutex_unlock(&q->sysfs_lock); 920*a2247f19SDamien Le Moal mutex_unlock(&q->sysfs_dir_lock); 921*a2247f19SDamien Le Moal kobject_del(&q->kobj); 922*a2247f19SDamien Le Moal blk_trace_remove_sysfs(dev); 923*a2247f19SDamien Le Moal kobject_put(&dev->kobj); 924*a2247f19SDamien Le Moal 925*a2247f19SDamien Le Moal return ret; 9268324aa91SJens Axboe } 9278324aa91SJens Axboe 9282c2086afSBart Van Assche /** 9292c2086afSBart Van Assche * blk_unregister_queue - counterpart of blk_register_queue() 9302c2086afSBart Van Assche * @disk: Disk of which the request queue should be unregistered from sysfs. 9312c2086afSBart Van Assche * 9322c2086afSBart Van Assche * Note: the caller is responsible for guaranteeing that this function is called 9332c2086afSBart Van Assche * after blk_register_queue() has finished. 9342c2086afSBart Van Assche */ 9358324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 9368324aa91SJens Axboe { 9378324aa91SJens Axboe struct request_queue *q = disk->queue; 9388324aa91SJens Axboe 939fb199746SAkinobu Mita if (WARN_ON(!q)) 940fb199746SAkinobu Mita return; 941fb199746SAkinobu Mita 942fa70d2e2SMike Snitzer /* Return early if disk->queue was never registered. */ 94358c898baSMing Lei if (!blk_queue_registered(q)) 944fa70d2e2SMike Snitzer return; 945fa70d2e2SMike Snitzer 946667257e8SMike Snitzer /* 9472c2086afSBart Van Assche * Since sysfs_remove_dir() prevents adding new directory entries 9482c2086afSBart Van Assche * before removal of existing entries starts, protect against 9492c2086afSBart Van Assche * concurrent elv_iosched_store() calls. 950667257e8SMike Snitzer */ 951e9a823fbSDavid Jeffery mutex_lock(&q->sysfs_lock); 9528814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 953cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock); 954334335d2SOmar Sandoval 955cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock); 9562c2086afSBart Van Assche /* 9572c2086afSBart Van Assche * Remove the sysfs attributes before unregistering the queue data 9582c2086afSBart Van Assche * structures that can be modified through sysfs. 9592c2086afSBart Van Assche */ 960344e9ffcSJens Axboe if (queue_is_mq(q)) 961b21d5b30SMatias Bjørling blk_mq_unregister_dev(disk_to_dev(disk), q); 9628324aa91SJens Axboe 9638324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9648324aa91SJens Axboe kobject_del(&q->kobj); 96548c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 966667257e8SMike Snitzer 967b89f625eSMing Lei mutex_lock(&q->sysfs_lock); 968344e9ffcSJens Axboe if (q->elevator) 9692c2086afSBart Van Assche elv_unregister_queue(q); 970*a2247f19SDamien Le Moal disk_unregister_independent_access_ranges(disk); 971b89f625eSMing Lei mutex_unlock(&q->sysfs_lock); 972cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 9732c2086afSBart Van Assche 9742c2086afSBart Van Assche kobject_put(&disk_to_dev(disk)->kobj); 9758324aa91SJens Axboe } 976