1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 28324aa91SJens Axboe /* 38324aa91SJens Axboe * Functions related to sysfs handling 48324aa91SJens Axboe */ 58324aa91SJens Axboe #include <linux/kernel.h> 65a0e3ad6STejun Heo #include <linux/slab.h> 78324aa91SJens Axboe #include <linux/module.h> 88324aa91SJens Axboe #include <linux/bio.h> 98324aa91SJens Axboe #include <linux/blkdev.h> 1066114cadSTejun Heo #include <linux/backing-dev.h> 118324aa91SJens Axboe #include <linux/blktrace_api.h> 12320ae51fSJens Axboe #include <linux/blk-mq.h> 13eea8f41cSTejun Heo #include <linux/blk-cgroup.h> 148324aa91SJens Axboe 158324aa91SJens Axboe #include "blk.h" 163edcc0ceSMing Lei #include "blk-mq.h" 17d173a251SOmar Sandoval #include "blk-mq-debugfs.h" 1887760e5eSJens Axboe #include "blk-wbt.h" 198324aa91SJens Axboe 208324aa91SJens Axboe struct queue_sysfs_entry { 218324aa91SJens Axboe struct attribute attr; 228324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 238324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 248324aa91SJens Axboe }; 258324aa91SJens Axboe 268324aa91SJens Axboe static ssize_t 279cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 288324aa91SJens Axboe { 299cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 308324aa91SJens Axboe } 318324aa91SJens Axboe 328324aa91SJens Axboe static ssize_t 338324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 348324aa91SJens Axboe { 35b1f3b64dSDave Reisner int err; 36b1f3b64dSDave Reisner unsigned long v; 378324aa91SJens Axboe 38ed751e68SJingoo Han err = kstrtoul(page, 10, &v); 39b1f3b64dSDave Reisner if (err || v > UINT_MAX) 40b1f3b64dSDave Reisner return -EINVAL; 41b1f3b64dSDave Reisner 42b1f3b64dSDave Reisner *var = v; 43b1f3b64dSDave Reisner 448324aa91SJens Axboe return count; 458324aa91SJens Axboe } 468324aa91SJens Axboe 4780e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page) 4887760e5eSJens Axboe { 4987760e5eSJens Axboe int err; 5080e091d1SJens Axboe s64 v; 5187760e5eSJens Axboe 5280e091d1SJens Axboe err = kstrtos64(page, 10, &v); 5387760e5eSJens Axboe if (err < 0) 5487760e5eSJens Axboe return err; 5587760e5eSJens Axboe 5687760e5eSJens Axboe *var = v; 5787760e5eSJens Axboe return 0; 5887760e5eSJens Axboe } 5987760e5eSJens Axboe 608324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 618324aa91SJens Axboe { 628324aa91SJens Axboe return queue_var_show(q->nr_requests, (page)); 638324aa91SJens Axboe } 648324aa91SJens Axboe 658324aa91SJens Axboe static ssize_t 668324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 678324aa91SJens Axboe { 688324aa91SJens Axboe unsigned long nr; 69e3a2b3f9SJens Axboe int ret, err; 70b8a9ae77SJens Axboe 71344e9ffcSJens Axboe if (!queue_is_mq(q)) 72b8a9ae77SJens Axboe return -EINVAL; 73b8a9ae77SJens Axboe 74b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 75b1f3b64dSDave Reisner if (ret < 0) 76b1f3b64dSDave Reisner return ret; 77b1f3b64dSDave Reisner 788324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 798324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 808324aa91SJens Axboe 81e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr); 82e3a2b3f9SJens Axboe if (err) 83e3a2b3f9SJens Axboe return err; 84a051661cSTejun Heo 858324aa91SJens Axboe return ret; 868324aa91SJens Axboe } 878324aa91SJens Axboe 888324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 898324aa91SJens Axboe { 90dc3b17ccSJan Kara unsigned long ra_kb = q->backing_dev_info->ra_pages << 9109cbfeafSKirill A. Shutemov (PAGE_SHIFT - 10); 928324aa91SJens Axboe 938324aa91SJens Axboe return queue_var_show(ra_kb, (page)); 948324aa91SJens Axboe } 958324aa91SJens Axboe 968324aa91SJens Axboe static ssize_t 978324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 988324aa91SJens Axboe { 998324aa91SJens Axboe unsigned long ra_kb; 1008324aa91SJens Axboe ssize_t ret = queue_var_store(&ra_kb, page, count); 1018324aa91SJens Axboe 102b1f3b64dSDave Reisner if (ret < 0) 103b1f3b64dSDave Reisner return ret; 104b1f3b64dSDave Reisner 105dc3b17ccSJan Kara q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 1068324aa91SJens Axboe 1078324aa91SJens Axboe return ret; 1088324aa91SJens Axboe } 1098324aa91SJens Axboe 1108324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1118324aa91SJens Axboe { 112ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1138324aa91SJens Axboe 1148324aa91SJens Axboe return queue_var_show(max_sectors_kb, (page)); 1158324aa91SJens Axboe } 1168324aa91SJens Axboe 117c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 118c77a5710SMartin K. Petersen { 119c77a5710SMartin K. Petersen return queue_var_show(queue_max_segments(q), (page)); 120c77a5710SMartin K. Petersen } 121c77a5710SMartin K. Petersen 1221e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q, 1231e739730SChristoph Hellwig char *page) 1241e739730SChristoph Hellwig { 1251e739730SChristoph Hellwig return queue_var_show(queue_max_discard_segments(q), (page)); 1261e739730SChristoph Hellwig } 1271e739730SChristoph Hellwig 12813f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 12913f05c8dSMartin K. Petersen { 13013f05c8dSMartin K. Petersen return queue_var_show(q->limits.max_integrity_segments, (page)); 13113f05c8dSMartin K. Petersen } 13213f05c8dSMartin K. Petersen 133c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 134c77a5710SMartin K. Petersen { 135c77a5710SMartin K. Petersen return queue_var_show(queue_max_segment_size(q), (page)); 136c77a5710SMartin K. Petersen } 137c77a5710SMartin K. Petersen 138e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 139e68b903cSMartin K. Petersen { 140e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 141e68b903cSMartin K. Petersen } 142e68b903cSMartin K. Petersen 143c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 144c72758f3SMartin K. Petersen { 145c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 146c72758f3SMartin K. Petersen } 147c72758f3SMartin K. Petersen 14887caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 14987caf97cSHannes Reinecke { 15087caf97cSHannes Reinecke return queue_var_show(q->limits.chunk_sectors, page); 15187caf97cSHannes Reinecke } 15287caf97cSHannes Reinecke 153c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 154c72758f3SMartin K. Petersen { 155c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 156c72758f3SMartin K. Petersen } 157c72758f3SMartin K. Petersen 158c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 159c72758f3SMartin K. Petersen { 160c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1618324aa91SJens Axboe } 1628324aa91SJens Axboe 16386b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 16486b37281SMartin K. Petersen { 16586b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 16686b37281SMartin K. Petersen } 16786b37281SMartin K. Petersen 1680034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 1690034af03SJens Axboe { 1700034af03SJens Axboe 17118f922d0SAlan return sprintf(page, "%llu\n", 17218f922d0SAlan (unsigned long long)q->limits.max_hw_discard_sectors << 9); 1730034af03SJens Axboe } 1740034af03SJens Axboe 17586b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 17686b37281SMartin K. Petersen { 177a934a00aSMartin K. Petersen return sprintf(page, "%llu\n", 178a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9); 17986b37281SMartin K. Petersen } 18086b37281SMartin K. Petersen 1810034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q, 1820034af03SJens Axboe const char *page, size_t count) 1830034af03SJens Axboe { 1840034af03SJens Axboe unsigned long max_discard; 1850034af03SJens Axboe ssize_t ret = queue_var_store(&max_discard, page, count); 1860034af03SJens Axboe 1870034af03SJens Axboe if (ret < 0) 1880034af03SJens Axboe return ret; 1890034af03SJens Axboe 1900034af03SJens Axboe if (max_discard & (q->limits.discard_granularity - 1)) 1910034af03SJens Axboe return -EINVAL; 1920034af03SJens Axboe 1930034af03SJens Axboe max_discard >>= 9; 1940034af03SJens Axboe if (max_discard > UINT_MAX) 1950034af03SJens Axboe return -EINVAL; 1960034af03SJens Axboe 1970034af03SJens Axboe if (max_discard > q->limits.max_hw_discard_sectors) 1980034af03SJens Axboe max_discard = q->limits.max_hw_discard_sectors; 1990034af03SJens Axboe 2000034af03SJens Axboe q->limits.max_discard_sectors = max_discard; 2010034af03SJens Axboe return ret; 2020034af03SJens Axboe } 2030034af03SJens Axboe 20498262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 20598262f27SMartin K. Petersen { 20648920ff2SChristoph Hellwig return queue_var_show(0, page); 20798262f27SMartin K. Petersen } 20898262f27SMartin K. Petersen 2094363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 2104363ac7cSMartin K. Petersen { 2114363ac7cSMartin K. Petersen return sprintf(page, "%llu\n", 2124363ac7cSMartin K. Petersen (unsigned long long)q->limits.max_write_same_sectors << 9); 2134363ac7cSMartin K. Petersen } 2144363ac7cSMartin K. Petersen 215a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 216a6f0788eSChaitanya Kulkarni { 217a6f0788eSChaitanya Kulkarni return sprintf(page, "%llu\n", 218a6f0788eSChaitanya Kulkarni (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 219a6f0788eSChaitanya Kulkarni } 2204363ac7cSMartin K. Petersen 2218324aa91SJens Axboe static ssize_t 2228324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 2238324aa91SJens Axboe { 2248324aa91SJens Axboe unsigned long max_sectors_kb, 225ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 22609cbfeafSKirill A. Shutemov page_kb = 1 << (PAGE_SHIFT - 10); 2278324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 2288324aa91SJens Axboe 229b1f3b64dSDave Reisner if (ret < 0) 230b1f3b64dSDave Reisner return ret; 231b1f3b64dSDave Reisner 232ca369d51SMartin K. Petersen max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 233ca369d51SMartin K. Petersen q->limits.max_dev_sectors >> 1); 234ca369d51SMartin K. Petersen 2358324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 2368324aa91SJens Axboe return -EINVAL; 2377c239517SWu Fengguang 2380d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 239c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 240dc3b17ccSJan Kara q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 2410d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 2428324aa91SJens Axboe 2438324aa91SJens Axboe return ret; 2448324aa91SJens Axboe } 2458324aa91SJens Axboe 2468324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 2478324aa91SJens Axboe { 248ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 2498324aa91SJens Axboe 2508324aa91SJens Axboe return queue_var_show(max_hw_sectors_kb, (page)); 2518324aa91SJens Axboe } 2528324aa91SJens Axboe 253956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 254956bcb7cSJens Axboe static ssize_t \ 255956bcb7cSJens Axboe queue_show_##name(struct request_queue *q, char *page) \ 256956bcb7cSJens Axboe { \ 257956bcb7cSJens Axboe int bit; \ 258956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 259956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 260956bcb7cSJens Axboe } \ 261956bcb7cSJens Axboe static ssize_t \ 262956bcb7cSJens Axboe queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 263956bcb7cSJens Axboe { \ 264956bcb7cSJens Axboe unsigned long val; \ 265956bcb7cSJens Axboe ssize_t ret; \ 266956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 267c678ef52SArnd Bergmann if (ret < 0) \ 268c678ef52SArnd Bergmann return ret; \ 269956bcb7cSJens Axboe if (neg) \ 270956bcb7cSJens Axboe val = !val; \ 271956bcb7cSJens Axboe \ 272956bcb7cSJens Axboe if (val) \ 2738814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ 274956bcb7cSJens Axboe else \ 2758814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ 276956bcb7cSJens Axboe return ret; \ 2771308835fSBartlomiej Zolnierkiewicz } 2781308835fSBartlomiej Zolnierkiewicz 279956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 280956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 281956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 282956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 2831308835fSBartlomiej Zolnierkiewicz 284797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page) 285797476b8SDamien Le Moal { 286797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 287797476b8SDamien Le Moal case BLK_ZONED_HA: 288797476b8SDamien Le Moal return sprintf(page, "host-aware\n"); 289797476b8SDamien Le Moal case BLK_ZONED_HM: 290797476b8SDamien Le Moal return sprintf(page, "host-managed\n"); 291797476b8SDamien Le Moal default: 292797476b8SDamien Le Moal return sprintf(page, "none\n"); 293797476b8SDamien Le Moal } 294797476b8SDamien Le Moal } 295797476b8SDamien Le Moal 296965b652eSDamien Le Moal static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) 297965b652eSDamien Le Moal { 298965b652eSDamien Le Moal return queue_var_show(blk_queue_nr_zones(q), page); 299965b652eSDamien Le Moal } 300965b652eSDamien Le Moal 301ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 302ac9fafa1SAlan D. Brunelle { 303488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 304488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 305ac9fafa1SAlan D. Brunelle } 306ac9fafa1SAlan D. Brunelle 307ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 308ac9fafa1SAlan D. Brunelle size_t count) 309ac9fafa1SAlan D. Brunelle { 310ac9fafa1SAlan D. Brunelle unsigned long nm; 311ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 312ac9fafa1SAlan D. Brunelle 313b1f3b64dSDave Reisner if (ret < 0) 314b1f3b64dSDave Reisner return ret; 315b1f3b64dSDave Reisner 31657d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 31757d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 318488991e2SAlan D. Brunelle if (nm == 2) 31957d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); 320488991e2SAlan D. Brunelle else if (nm) 32157d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 3221308835fSBartlomiej Zolnierkiewicz 323ac9fafa1SAlan D. Brunelle return ret; 324ac9fafa1SAlan D. Brunelle } 325ac9fafa1SAlan D. Brunelle 326c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 327c7c22e4dSJens Axboe { 3289cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 3295757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 330c7c22e4dSJens Axboe 3315757a6d7SDan Williams return queue_var_show(set << force, page); 332c7c22e4dSJens Axboe } 333c7c22e4dSJens Axboe 334c7c22e4dSJens Axboe static ssize_t 335c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 336c7c22e4dSJens Axboe { 337c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 3380a06ff06SChristoph Hellwig #ifdef CONFIG_SMP 339c7c22e4dSJens Axboe unsigned long val; 340c7c22e4dSJens Axboe 341c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 342b1f3b64dSDave Reisner if (ret < 0) 343b1f3b64dSDave Reisner return ret; 344b1f3b64dSDave Reisner 345e8037d49SEric Seppanen if (val == 2) { 34657d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 34757d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 348e8037d49SEric Seppanen } else if (val == 1) { 34957d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 35057d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 351e8037d49SEric Seppanen } else if (val == 0) { 35257d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 35357d74df9SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 3545757a6d7SDan Williams } 355c7c22e4dSJens Axboe #endif 356c7c22e4dSJens Axboe return ret; 357c7c22e4dSJens Axboe } 3588324aa91SJens Axboe 35906426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 36006426adfSJens Axboe { 36164f1c21eSJens Axboe int val; 36264f1c21eSJens Axboe 36329ece8b4SYufen Yu if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) 36429ece8b4SYufen Yu val = BLK_MQ_POLL_CLASSIC; 36564f1c21eSJens Axboe else 36664f1c21eSJens Axboe val = q->poll_nsec / 1000; 36764f1c21eSJens Axboe 36864f1c21eSJens Axboe return sprintf(page, "%d\n", val); 36906426adfSJens Axboe } 37006426adfSJens Axboe 37106426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 37206426adfSJens Axboe size_t count) 37306426adfSJens Axboe { 37464f1c21eSJens Axboe int err, val; 37506426adfSJens Axboe 37606426adfSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 37706426adfSJens Axboe return -EINVAL; 37806426adfSJens Axboe 37964f1c21eSJens Axboe err = kstrtoint(page, 10, &val); 38064f1c21eSJens Axboe if (err < 0) 38164f1c21eSJens Axboe return err; 38206426adfSJens Axboe 38329ece8b4SYufen Yu if (val == BLK_MQ_POLL_CLASSIC) 38429ece8b4SYufen Yu q->poll_nsec = BLK_MQ_POLL_CLASSIC; 38529ece8b4SYufen Yu else if (val >= 0) 38664f1c21eSJens Axboe q->poll_nsec = val * 1000; 38729ece8b4SYufen Yu else 38829ece8b4SYufen Yu return -EINVAL; 38964f1c21eSJens Axboe 39064f1c21eSJens Axboe return count; 39106426adfSJens Axboe } 39206426adfSJens Axboe 39305229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page) 39405229beeSJens Axboe { 39505229beeSJens Axboe return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 39605229beeSJens Axboe } 39705229beeSJens Axboe 39805229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page, 39905229beeSJens Axboe size_t count) 40005229beeSJens Axboe { 40105229beeSJens Axboe unsigned long poll_on; 40205229beeSJens Axboe ssize_t ret; 40305229beeSJens Axboe 404cd19181bSMing Lei if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || 405cd19181bSMing Lei !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) 40605229beeSJens Axboe return -EINVAL; 40705229beeSJens Axboe 40805229beeSJens Axboe ret = queue_var_store(&poll_on, page, count); 40905229beeSJens Axboe if (ret < 0) 41005229beeSJens Axboe return ret; 41105229beeSJens Axboe 41205229beeSJens Axboe if (poll_on) 4138814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_POLL, q); 41405229beeSJens Axboe else 4158814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 41605229beeSJens Axboe 41705229beeSJens Axboe return ret; 41805229beeSJens Axboe } 41905229beeSJens Axboe 42065cd1d13SWeiping Zhang static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) 42165cd1d13SWeiping Zhang { 42265cd1d13SWeiping Zhang return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); 42365cd1d13SWeiping Zhang } 42465cd1d13SWeiping Zhang 42565cd1d13SWeiping Zhang static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, 42665cd1d13SWeiping Zhang size_t count) 42765cd1d13SWeiping Zhang { 42865cd1d13SWeiping Zhang unsigned int val; 42965cd1d13SWeiping Zhang int err; 43065cd1d13SWeiping Zhang 43165cd1d13SWeiping Zhang err = kstrtou32(page, 10, &val); 43265cd1d13SWeiping Zhang if (err || val == 0) 43365cd1d13SWeiping Zhang return -EINVAL; 43465cd1d13SWeiping Zhang 43565cd1d13SWeiping Zhang blk_queue_rq_timeout(q, msecs_to_jiffies(val)); 43665cd1d13SWeiping Zhang 43765cd1d13SWeiping Zhang return count; 43865cd1d13SWeiping Zhang } 43965cd1d13SWeiping Zhang 44087760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 44187760e5eSJens Axboe { 442a7905043SJosef Bacik if (!wbt_rq_qos(q)) 44387760e5eSJens Axboe return -EINVAL; 44487760e5eSJens Axboe 445a7905043SJosef Bacik return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); 44687760e5eSJens Axboe } 44787760e5eSJens Axboe 44887760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 44987760e5eSJens Axboe size_t count) 45087760e5eSJens Axboe { 451a7905043SJosef Bacik struct rq_qos *rqos; 45287760e5eSJens Axboe ssize_t ret; 45380e091d1SJens Axboe s64 val; 45487760e5eSJens Axboe 45587760e5eSJens Axboe ret = queue_var_store64(&val, page); 45687760e5eSJens Axboe if (ret < 0) 45787760e5eSJens Axboe return ret; 458d62118b6SJens Axboe if (val < -1) 459d62118b6SJens Axboe return -EINVAL; 460d62118b6SJens Axboe 461a7905043SJosef Bacik rqos = wbt_rq_qos(q); 462a7905043SJosef Bacik if (!rqos) { 463d62118b6SJens Axboe ret = wbt_init(q); 464d62118b6SJens Axboe if (ret) 465d62118b6SJens Axboe return ret; 466d62118b6SJens Axboe } 46787760e5eSJens Axboe 46880e091d1SJens Axboe if (val == -1) 469a7905043SJosef Bacik val = wbt_default_latency_nsec(q); 47080e091d1SJens Axboe else if (val >= 0) 471a7905043SJosef Bacik val *= 1000ULL; 472d62118b6SJens Axboe 473b7143fe6SAleksei Zakharov if (wbt_get_min_lat(q) == val) 474b7143fe6SAleksei Zakharov return count; 475b7143fe6SAleksei Zakharov 476c125311dSJens Axboe /* 477c125311dSJens Axboe * Ensure that the queue is idled, in case the latency update 478c125311dSJens Axboe * ends up either enabling or disabling wbt completely. We can't 479c125311dSJens Axboe * have IO inflight if that happens. 480c125311dSJens Axboe */ 481c125311dSJens Axboe blk_mq_freeze_queue(q); 482c125311dSJens Axboe blk_mq_quiesce_queue(q); 48380e091d1SJens Axboe 484c125311dSJens Axboe wbt_set_min_lat(q, val); 485a7905043SJosef Bacik wbt_update_limits(q); 486c125311dSJens Axboe 487c125311dSJens Axboe blk_mq_unquiesce_queue(q); 488c125311dSJens Axboe blk_mq_unfreeze_queue(q); 489c125311dSJens Axboe 49087760e5eSJens Axboe return count; 49187760e5eSJens Axboe } 49287760e5eSJens Axboe 49393e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page) 49493e9d8e8SJens Axboe { 49593e9d8e8SJens Axboe if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 49693e9d8e8SJens Axboe return sprintf(page, "write back\n"); 49793e9d8e8SJens Axboe 49893e9d8e8SJens Axboe return sprintf(page, "write through\n"); 49993e9d8e8SJens Axboe } 50093e9d8e8SJens Axboe 50193e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page, 50293e9d8e8SJens Axboe size_t count) 50393e9d8e8SJens Axboe { 50493e9d8e8SJens Axboe int set = -1; 50593e9d8e8SJens Axboe 50693e9d8e8SJens Axboe if (!strncmp(page, "write back", 10)) 50793e9d8e8SJens Axboe set = 1; 50893e9d8e8SJens Axboe else if (!strncmp(page, "write through", 13) || 50993e9d8e8SJens Axboe !strncmp(page, "none", 4)) 51093e9d8e8SJens Axboe set = 0; 51193e9d8e8SJens Axboe 51293e9d8e8SJens Axboe if (set == -1) 51393e9d8e8SJens Axboe return -EINVAL; 51493e9d8e8SJens Axboe 51593e9d8e8SJens Axboe if (set) 5168814ce8aSBart Van Assche blk_queue_flag_set(QUEUE_FLAG_WC, q); 51793e9d8e8SJens Axboe else 5188814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_WC, q); 51993e9d8e8SJens Axboe 52093e9d8e8SJens Axboe return count; 52193e9d8e8SJens Axboe } 52293e9d8e8SJens Axboe 5236fcefbe5SKent Overstreet static ssize_t queue_fua_show(struct request_queue *q, char *page) 5246fcefbe5SKent Overstreet { 5256fcefbe5SKent Overstreet return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); 5266fcefbe5SKent Overstreet } 5276fcefbe5SKent Overstreet 528ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page) 529ea6ca600SYigal Korman { 530ea6ca600SYigal Korman return queue_var_show(blk_queue_dax(q), page); 531ea6ca600SYigal Korman } 532ea6ca600SYigal Korman 5338324aa91SJens Axboe static struct queue_sysfs_entry queue_requests_entry = { 5345657a819SJoe Perches .attr = {.name = "nr_requests", .mode = 0644 }, 5358324aa91SJens Axboe .show = queue_requests_show, 5368324aa91SJens Axboe .store = queue_requests_store, 5378324aa91SJens Axboe }; 5388324aa91SJens Axboe 5398324aa91SJens Axboe static struct queue_sysfs_entry queue_ra_entry = { 5405657a819SJoe Perches .attr = {.name = "read_ahead_kb", .mode = 0644 }, 5418324aa91SJens Axboe .show = queue_ra_show, 5428324aa91SJens Axboe .store = queue_ra_store, 5438324aa91SJens Axboe }; 5448324aa91SJens Axboe 5458324aa91SJens Axboe static struct queue_sysfs_entry queue_max_sectors_entry = { 5465657a819SJoe Perches .attr = {.name = "max_sectors_kb", .mode = 0644 }, 5478324aa91SJens Axboe .show = queue_max_sectors_show, 5488324aa91SJens Axboe .store = queue_max_sectors_store, 5498324aa91SJens Axboe }; 5508324aa91SJens Axboe 5518324aa91SJens Axboe static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 5525657a819SJoe Perches .attr = {.name = "max_hw_sectors_kb", .mode = 0444 }, 5538324aa91SJens Axboe .show = queue_max_hw_sectors_show, 5548324aa91SJens Axboe }; 5558324aa91SJens Axboe 556c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segments_entry = { 5575657a819SJoe Perches .attr = {.name = "max_segments", .mode = 0444 }, 558c77a5710SMartin K. Petersen .show = queue_max_segments_show, 559c77a5710SMartin K. Petersen }; 560c77a5710SMartin K. Petersen 5611e739730SChristoph Hellwig static struct queue_sysfs_entry queue_max_discard_segments_entry = { 5625657a819SJoe Perches .attr = {.name = "max_discard_segments", .mode = 0444 }, 5631e739730SChristoph Hellwig .show = queue_max_discard_segments_show, 5641e739730SChristoph Hellwig }; 5651e739730SChristoph Hellwig 56613f05c8dSMartin K. Petersen static struct queue_sysfs_entry queue_max_integrity_segments_entry = { 5675657a819SJoe Perches .attr = {.name = "max_integrity_segments", .mode = 0444 }, 56813f05c8dSMartin K. Petersen .show = queue_max_integrity_segments_show, 56913f05c8dSMartin K. Petersen }; 57013f05c8dSMartin K. Petersen 571c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segment_size_entry = { 5725657a819SJoe Perches .attr = {.name = "max_segment_size", .mode = 0444 }, 573c77a5710SMartin K. Petersen .show = queue_max_segment_size_show, 574c77a5710SMartin K. Petersen }; 575c77a5710SMartin K. Petersen 5768324aa91SJens Axboe static struct queue_sysfs_entry queue_iosched_entry = { 5775657a819SJoe Perches .attr = {.name = "scheduler", .mode = 0644 }, 5788324aa91SJens Axboe .show = elv_iosched_show, 5798324aa91SJens Axboe .store = elv_iosched_store, 5808324aa91SJens Axboe }; 5818324aa91SJens Axboe 582e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 5835657a819SJoe Perches .attr = {.name = "hw_sector_size", .mode = 0444 }, 584e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 585e1defc4fSMartin K. Petersen }; 586e1defc4fSMartin K. Petersen 587e1defc4fSMartin K. Petersen static struct queue_sysfs_entry queue_logical_block_size_entry = { 5885657a819SJoe Perches .attr = {.name = "logical_block_size", .mode = 0444 }, 589e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 590e68b903cSMartin K. Petersen }; 591e68b903cSMartin K. Petersen 592c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_physical_block_size_entry = { 5935657a819SJoe Perches .attr = {.name = "physical_block_size", .mode = 0444 }, 594c72758f3SMartin K. Petersen .show = queue_physical_block_size_show, 595c72758f3SMartin K. Petersen }; 596c72758f3SMartin K. Petersen 59787caf97cSHannes Reinecke static struct queue_sysfs_entry queue_chunk_sectors_entry = { 5985657a819SJoe Perches .attr = {.name = "chunk_sectors", .mode = 0444 }, 59987caf97cSHannes Reinecke .show = queue_chunk_sectors_show, 60087caf97cSHannes Reinecke }; 60187caf97cSHannes Reinecke 602c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_min_entry = { 6035657a819SJoe Perches .attr = {.name = "minimum_io_size", .mode = 0444 }, 604c72758f3SMartin K. Petersen .show = queue_io_min_show, 605c72758f3SMartin K. Petersen }; 606c72758f3SMartin K. Petersen 607c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_opt_entry = { 6085657a819SJoe Perches .attr = {.name = "optimal_io_size", .mode = 0444 }, 609c72758f3SMartin K. Petersen .show = queue_io_opt_show, 6108324aa91SJens Axboe }; 6118324aa91SJens Axboe 61286b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_granularity_entry = { 6135657a819SJoe Perches .attr = {.name = "discard_granularity", .mode = 0444 }, 61486b37281SMartin K. Petersen .show = queue_discard_granularity_show, 61586b37281SMartin K. Petersen }; 61686b37281SMartin K. Petersen 6170034af03SJens Axboe static struct queue_sysfs_entry queue_discard_max_hw_entry = { 6185657a819SJoe Perches .attr = {.name = "discard_max_hw_bytes", .mode = 0444 }, 6190034af03SJens Axboe .show = queue_discard_max_hw_show, 6200034af03SJens Axboe }; 6210034af03SJens Axboe 62286b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_max_entry = { 6235657a819SJoe Perches .attr = {.name = "discard_max_bytes", .mode = 0644 }, 62486b37281SMartin K. Petersen .show = queue_discard_max_show, 6250034af03SJens Axboe .store = queue_discard_max_store, 62686b37281SMartin K. Petersen }; 62786b37281SMartin K. Petersen 62898262f27SMartin K. Petersen static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 6295657a819SJoe Perches .attr = {.name = "discard_zeroes_data", .mode = 0444 }, 63098262f27SMartin K. Petersen .show = queue_discard_zeroes_data_show, 63198262f27SMartin K. Petersen }; 63298262f27SMartin K. Petersen 6334363ac7cSMartin K. Petersen static struct queue_sysfs_entry queue_write_same_max_entry = { 6345657a819SJoe Perches .attr = {.name = "write_same_max_bytes", .mode = 0444 }, 6354363ac7cSMartin K. Petersen .show = queue_write_same_max_show, 6364363ac7cSMartin K. Petersen }; 6374363ac7cSMartin K. Petersen 638a6f0788eSChaitanya Kulkarni static struct queue_sysfs_entry queue_write_zeroes_max_entry = { 6395657a819SJoe Perches .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 }, 640a6f0788eSChaitanya Kulkarni .show = queue_write_zeroes_max_show, 641a6f0788eSChaitanya Kulkarni }; 642a6f0788eSChaitanya Kulkarni 6431308835fSBartlomiej Zolnierkiewicz static struct queue_sysfs_entry queue_nonrot_entry = { 6445657a819SJoe Perches .attr = {.name = "rotational", .mode = 0644 }, 645956bcb7cSJens Axboe .show = queue_show_nonrot, 646956bcb7cSJens Axboe .store = queue_store_nonrot, 6471308835fSBartlomiej Zolnierkiewicz }; 6481308835fSBartlomiej Zolnierkiewicz 649797476b8SDamien Le Moal static struct queue_sysfs_entry queue_zoned_entry = { 6505657a819SJoe Perches .attr = {.name = "zoned", .mode = 0444 }, 651797476b8SDamien Le Moal .show = queue_zoned_show, 652797476b8SDamien Le Moal }; 653797476b8SDamien Le Moal 654965b652eSDamien Le Moal static struct queue_sysfs_entry queue_nr_zones_entry = { 655965b652eSDamien Le Moal .attr = {.name = "nr_zones", .mode = 0444 }, 656965b652eSDamien Le Moal .show = queue_nr_zones_show, 657965b652eSDamien Le Moal }; 658965b652eSDamien Le Moal 659ac9fafa1SAlan D. Brunelle static struct queue_sysfs_entry queue_nomerges_entry = { 6605657a819SJoe Perches .attr = {.name = "nomerges", .mode = 0644 }, 661ac9fafa1SAlan D. Brunelle .show = queue_nomerges_show, 662ac9fafa1SAlan D. Brunelle .store = queue_nomerges_store, 663ac9fafa1SAlan D. Brunelle }; 664ac9fafa1SAlan D. Brunelle 665c7c22e4dSJens Axboe static struct queue_sysfs_entry queue_rq_affinity_entry = { 6665657a819SJoe Perches .attr = {.name = "rq_affinity", .mode = 0644 }, 667c7c22e4dSJens Axboe .show = queue_rq_affinity_show, 668c7c22e4dSJens Axboe .store = queue_rq_affinity_store, 669c7c22e4dSJens Axboe }; 670c7c22e4dSJens Axboe 671bc58ba94SJens Axboe static struct queue_sysfs_entry queue_iostats_entry = { 6725657a819SJoe Perches .attr = {.name = "iostats", .mode = 0644 }, 673956bcb7cSJens Axboe .show = queue_show_iostats, 674956bcb7cSJens Axboe .store = queue_store_iostats, 675bc58ba94SJens Axboe }; 676bc58ba94SJens Axboe 677e2e1a148SJens Axboe static struct queue_sysfs_entry queue_random_entry = { 6785657a819SJoe Perches .attr = {.name = "add_random", .mode = 0644 }, 679956bcb7cSJens Axboe .show = queue_show_random, 680956bcb7cSJens Axboe .store = queue_store_random, 681e2e1a148SJens Axboe }; 682e2e1a148SJens Axboe 68305229beeSJens Axboe static struct queue_sysfs_entry queue_poll_entry = { 6845657a819SJoe Perches .attr = {.name = "io_poll", .mode = 0644 }, 68505229beeSJens Axboe .show = queue_poll_show, 68605229beeSJens Axboe .store = queue_poll_store, 68705229beeSJens Axboe }; 68805229beeSJens Axboe 68906426adfSJens Axboe static struct queue_sysfs_entry queue_poll_delay_entry = { 6905657a819SJoe Perches .attr = {.name = "io_poll_delay", .mode = 0644 }, 69106426adfSJens Axboe .show = queue_poll_delay_show, 69206426adfSJens Axboe .store = queue_poll_delay_store, 69306426adfSJens Axboe }; 69406426adfSJens Axboe 69593e9d8e8SJens Axboe static struct queue_sysfs_entry queue_wc_entry = { 6965657a819SJoe Perches .attr = {.name = "write_cache", .mode = 0644 }, 69793e9d8e8SJens Axboe .show = queue_wc_show, 69893e9d8e8SJens Axboe .store = queue_wc_store, 69993e9d8e8SJens Axboe }; 70093e9d8e8SJens Axboe 7016fcefbe5SKent Overstreet static struct queue_sysfs_entry queue_fua_entry = { 7025657a819SJoe Perches .attr = {.name = "fua", .mode = 0444 }, 7036fcefbe5SKent Overstreet .show = queue_fua_show, 7046fcefbe5SKent Overstreet }; 7056fcefbe5SKent Overstreet 706ea6ca600SYigal Korman static struct queue_sysfs_entry queue_dax_entry = { 7075657a819SJoe Perches .attr = {.name = "dax", .mode = 0444 }, 708ea6ca600SYigal Korman .show = queue_dax_show, 709ea6ca600SYigal Korman }; 710ea6ca600SYigal Korman 71165cd1d13SWeiping Zhang static struct queue_sysfs_entry queue_io_timeout_entry = { 71265cd1d13SWeiping Zhang .attr = {.name = "io_timeout", .mode = 0644 }, 71365cd1d13SWeiping Zhang .show = queue_io_timeout_show, 71465cd1d13SWeiping Zhang .store = queue_io_timeout_store, 71565cd1d13SWeiping Zhang }; 71665cd1d13SWeiping Zhang 71787760e5eSJens Axboe static struct queue_sysfs_entry queue_wb_lat_entry = { 7185657a819SJoe Perches .attr = {.name = "wbt_lat_usec", .mode = 0644 }, 71987760e5eSJens Axboe .show = queue_wb_lat_show, 72087760e5eSJens Axboe .store = queue_wb_lat_store, 72187760e5eSJens Axboe }; 72287760e5eSJens Axboe 723297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 724297e3d85SShaohua Li static struct queue_sysfs_entry throtl_sample_time_entry = { 7255657a819SJoe Perches .attr = {.name = "throttle_sample_time", .mode = 0644 }, 726297e3d85SShaohua Li .show = blk_throtl_sample_time_show, 727297e3d85SShaohua Li .store = blk_throtl_sample_time_store, 728297e3d85SShaohua Li }; 729297e3d85SShaohua Li #endif 730297e3d85SShaohua Li 7314d25339eSWeiping Zhang static struct attribute *queue_attrs[] = { 7328324aa91SJens Axboe &queue_requests_entry.attr, 7338324aa91SJens Axboe &queue_ra_entry.attr, 7348324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 7358324aa91SJens Axboe &queue_max_sectors_entry.attr, 736c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 7371e739730SChristoph Hellwig &queue_max_discard_segments_entry.attr, 73813f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 739c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 7408324aa91SJens Axboe &queue_iosched_entry.attr, 741e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 742e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 743c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 74487caf97cSHannes Reinecke &queue_chunk_sectors_entry.attr, 745c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 746c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 74786b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 74886b37281SMartin K. Petersen &queue_discard_max_entry.attr, 7490034af03SJens Axboe &queue_discard_max_hw_entry.attr, 75098262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 7514363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr, 752a6f0788eSChaitanya Kulkarni &queue_write_zeroes_max_entry.attr, 7531308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 754797476b8SDamien Le Moal &queue_zoned_entry.attr, 755965b652eSDamien Le Moal &queue_nr_zones_entry.attr, 756ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 757c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 758bc58ba94SJens Axboe &queue_iostats_entry.attr, 759e2e1a148SJens Axboe &queue_random_entry.attr, 76005229beeSJens Axboe &queue_poll_entry.attr, 76193e9d8e8SJens Axboe &queue_wc_entry.attr, 7626fcefbe5SKent Overstreet &queue_fua_entry.attr, 763ea6ca600SYigal Korman &queue_dax_entry.attr, 76487760e5eSJens Axboe &queue_wb_lat_entry.attr, 76506426adfSJens Axboe &queue_poll_delay_entry.attr, 76665cd1d13SWeiping Zhang &queue_io_timeout_entry.attr, 767297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 768297e3d85SShaohua Li &throtl_sample_time_entry.attr, 769297e3d85SShaohua Li #endif 7708324aa91SJens Axboe NULL, 7718324aa91SJens Axboe }; 7728324aa91SJens Axboe 7734d25339eSWeiping Zhang static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, 7744d25339eSWeiping Zhang int n) 7754d25339eSWeiping Zhang { 7764d25339eSWeiping Zhang struct request_queue *q = 7774d25339eSWeiping Zhang container_of(kobj, struct request_queue, kobj); 7784d25339eSWeiping Zhang 7794d25339eSWeiping Zhang if (attr == &queue_io_timeout_entry.attr && 7804d25339eSWeiping Zhang (!q->mq_ops || !q->mq_ops->timeout)) 7814d25339eSWeiping Zhang return 0; 7824d25339eSWeiping Zhang 7834d25339eSWeiping Zhang return attr->mode; 7844d25339eSWeiping Zhang } 7854d25339eSWeiping Zhang 7864d25339eSWeiping Zhang static struct attribute_group queue_attr_group = { 7874d25339eSWeiping Zhang .attrs = queue_attrs, 7884d25339eSWeiping Zhang .is_visible = queue_attr_visible, 7894d25339eSWeiping Zhang }; 7904d25339eSWeiping Zhang 7914d25339eSWeiping Zhang 7928324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 7938324aa91SJens Axboe 7948324aa91SJens Axboe static ssize_t 7958324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 7968324aa91SJens Axboe { 7978324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7988324aa91SJens Axboe struct request_queue *q = 7998324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 8008324aa91SJens Axboe ssize_t res; 8018324aa91SJens Axboe 8028324aa91SJens Axboe if (!entry->show) 8038324aa91SJens Axboe return -EIO; 8048324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 8053f3299d5SBart Van Assche if (blk_queue_dying(q)) { 8068324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 8078324aa91SJens Axboe return -ENOENT; 8088324aa91SJens Axboe } 8098324aa91SJens Axboe res = entry->show(q, page); 8108324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 8118324aa91SJens Axboe return res; 8128324aa91SJens Axboe } 8138324aa91SJens Axboe 8148324aa91SJens Axboe static ssize_t 8158324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 8168324aa91SJens Axboe const char *page, size_t length) 8178324aa91SJens Axboe { 8188324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 8196728cb0eSJens Axboe struct request_queue *q; 8208324aa91SJens Axboe ssize_t res; 8218324aa91SJens Axboe 8228324aa91SJens Axboe if (!entry->store) 8238324aa91SJens Axboe return -EIO; 8246728cb0eSJens Axboe 8256728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 8268324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 8273f3299d5SBart Van Assche if (blk_queue_dying(q)) { 8288324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 8298324aa91SJens Axboe return -ENOENT; 8308324aa91SJens Axboe } 8318324aa91SJens Axboe res = entry->store(q, page, length); 8328324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 8338324aa91SJens Axboe return res; 8348324aa91SJens Axboe } 8358324aa91SJens Axboe 836548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head) 837548bc8e1STejun Heo { 838548bc8e1STejun Heo struct request_queue *q = container_of(rcu_head, struct request_queue, 839548bc8e1STejun Heo rcu_head); 840548bc8e1STejun Heo kmem_cache_free(blk_requestq_cachep, q); 841548bc8e1STejun Heo } 842548bc8e1STejun Heo 84347cdee29SMing Lei /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ 84447cdee29SMing Lei static void blk_exit_queue(struct request_queue *q) 84547cdee29SMing Lei { 84647cdee29SMing Lei /* 84747cdee29SMing Lei * Since the I/O scheduler exit code may access cgroup information, 84847cdee29SMing Lei * perform I/O scheduler exit before disassociating from the block 84947cdee29SMing Lei * cgroup controller. 85047cdee29SMing Lei */ 85147cdee29SMing Lei if (q->elevator) { 85247cdee29SMing Lei ioc_clear_queue(q); 853c3e22192SMing Lei __elevator_exit(q, q->elevator); 85447cdee29SMing Lei q->elevator = NULL; 85547cdee29SMing Lei } 85647cdee29SMing Lei 85747cdee29SMing Lei /* 85847cdee29SMing Lei * Remove all references to @q from the block cgroup controller before 85947cdee29SMing Lei * restoring @q->queue_lock to avoid that restoring this pointer causes 86047cdee29SMing Lei * e.g. blkcg_print_blkgs() to crash. 86147cdee29SMing Lei */ 86247cdee29SMing Lei blkcg_exit_queue(q); 86347cdee29SMing Lei 86447cdee29SMing Lei /* 86547cdee29SMing Lei * Since the cgroup code may dereference the @q->backing_dev_info 86647cdee29SMing Lei * pointer, only decrease its reference count after having removed the 86747cdee29SMing Lei * association with the block cgroup controller. 86847cdee29SMing Lei */ 86947cdee29SMing Lei bdi_put(q->backing_dev_info); 87047cdee29SMing Lei } 87147cdee29SMing Lei 87247cdee29SMing Lei 8738324aa91SJens Axboe /** 8741e936428SMarcos Paulo de Souza * __blk_release_queue - release a request queue 875dc9edc44SBart Van Assche * @work: pointer to the release_work member of the request queue to be released 8768324aa91SJens Axboe * 8778324aa91SJens Axboe * Description: 8781e936428SMarcos Paulo de Souza * This function is called when a block device is being unregistered. The 8791e936428SMarcos Paulo de Souza * process of releasing a request queue starts with blk_cleanup_queue, which 8801e936428SMarcos Paulo de Souza * set the appropriate flags and then calls blk_put_queue, that decrements 8811e936428SMarcos Paulo de Souza * the reference counter of the request queue. Once the reference counter 8821e936428SMarcos Paulo de Souza * of the request queue reaches zero, blk_release_queue is called to release 8831e936428SMarcos Paulo de Souza * all allocated resources of the request queue. 884dc9edc44SBart Van Assche */ 885dc9edc44SBart Van Assche static void __blk_release_queue(struct work_struct *work) 8868324aa91SJens Axboe { 887dc9edc44SBart Van Assche struct request_queue *q = container_of(work, typeof(*q), release_work); 8888324aa91SJens Axboe 88934dbad5dSOmar Sandoval if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 89034dbad5dSOmar Sandoval blk_stat_remove_callback(q, q->poll_cb); 89134dbad5dSOmar Sandoval blk_stat_free_callback(q->poll_cb); 892777eb1bfSHannes Reinecke 89334dbad5dSOmar Sandoval blk_free_queue_stats(q->stats); 89434dbad5dSOmar Sandoval 89547cdee29SMing Lei blk_exit_queue(q); 89647cdee29SMing Lei 897bf505456SDamien Le Moal blk_queue_free_zone_bitmaps(q); 898bf505456SDamien Le Moal 899344e9ffcSJens Axboe if (queue_is_mq(q)) 900e09aae7eSMing Lei blk_mq_release(q); 90118741986SChristoph Hellwig 9028324aa91SJens Axboe blk_trace_shutdown(q); 9038324aa91SJens Axboe 904344e9ffcSJens Axboe if (queue_is_mq(q)) 90562ebce16SOmar Sandoval blk_mq_debugfs_unregister(q); 90662ebce16SOmar Sandoval 907338aa96dSKent Overstreet bioset_exit(&q->bio_split); 90854efd50bSKent Overstreet 909a73f730dSTejun Heo ida_simple_remove(&blk_queue_ida, q->id); 910548bc8e1STejun Heo call_rcu(&q->rcu_head, blk_free_queue_rcu); 9118324aa91SJens Axboe } 9128324aa91SJens Axboe 913dc9edc44SBart Van Assche static void blk_release_queue(struct kobject *kobj) 914dc9edc44SBart Van Assche { 915dc9edc44SBart Van Assche struct request_queue *q = 916dc9edc44SBart Van Assche container_of(kobj, struct request_queue, kobj); 917dc9edc44SBart Van Assche 918dc9edc44SBart Van Assche INIT_WORK(&q->release_work, __blk_release_queue); 919dc9edc44SBart Van Assche schedule_work(&q->release_work); 920dc9edc44SBart Van Assche } 921dc9edc44SBart Van Assche 92252cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 9238324aa91SJens Axboe .show = queue_attr_show, 9248324aa91SJens Axboe .store = queue_attr_store, 9258324aa91SJens Axboe }; 9268324aa91SJens Axboe 9278324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 9288324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 9298324aa91SJens Axboe .release = blk_release_queue, 9308324aa91SJens Axboe }; 9318324aa91SJens Axboe 9322c2086afSBart Van Assche /** 9332c2086afSBart Van Assche * blk_register_queue - register a block layer queue with sysfs 9342c2086afSBart Van Assche * @disk: Disk of which the request queue should be registered with sysfs. 9352c2086afSBart Van Assche */ 9368324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 9378324aa91SJens Axboe { 9388324aa91SJens Axboe int ret; 9391d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 9408324aa91SJens Axboe struct request_queue *q = disk->queue; 941*cecf5d87SMing Lei bool has_elevator = false; 9428324aa91SJens Axboe 943fb199746SAkinobu Mita if (WARN_ON(!q)) 9448324aa91SJens Axboe return -ENXIO; 9458324aa91SJens Axboe 94658c898baSMing Lei WARN_ONCE(blk_queue_registered(q), 947334335d2SOmar Sandoval "%s is registering an already registered queue\n", 948334335d2SOmar Sandoval kobject_name(&dev->kobj)); 949334335d2SOmar Sandoval 950749fefe6STejun Heo /* 95117497acbSTejun Heo * SCSI probing may synchronously create and destroy a lot of 95217497acbSTejun Heo * request_queues for non-existent devices. Shutting down a fully 95317497acbSTejun Heo * functional queue takes measureable wallclock time as RCU grace 95417497acbSTejun Heo * periods are involved. To avoid excessive latency in these 95517497acbSTejun Heo * cases, a request_queue starts out in a degraded mode which is 95617497acbSTejun Heo * faster to shut down and is made fully functional here as 95717497acbSTejun Heo * request_queues for non-existent devices never get registered. 958749fefe6STejun Heo */ 959df35c7c9SAlan Stern if (!blk_queue_init_done(q)) { 96057d74df9SChristoph Hellwig blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); 9613ef28e83SDan Williams percpu_ref_switch_to_percpu(&q->q_usage_counter); 962df35c7c9SAlan Stern } 963749fefe6STejun Heo 9641d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 9651d54ad6dSLi Zefan if (ret) 9661d54ad6dSLi Zefan return ret; 9671d54ad6dSLi Zefan 968*cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock); 969b410aff2STahsin Erdogan 970c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 971ed5302d3SLiu Yuan if (ret < 0) { 972ed5302d3SLiu Yuan blk_trace_remove_sysfs(dev); 973b410aff2STahsin Erdogan goto unlock; 974ed5302d3SLiu Yuan } 9758324aa91SJens Axboe 9764d25339eSWeiping Zhang ret = sysfs_create_group(&q->kobj, &queue_attr_group); 9774d25339eSWeiping Zhang if (ret) { 9784d25339eSWeiping Zhang blk_trace_remove_sysfs(dev); 9794d25339eSWeiping Zhang kobject_del(&q->kobj); 9804d25339eSWeiping Zhang kobject_put(&dev->kobj); 9814d25339eSWeiping Zhang goto unlock; 9824d25339eSWeiping Zhang } 9834d25339eSWeiping Zhang 984344e9ffcSJens Axboe if (queue_is_mq(q)) { 9852d0364c8SBart Van Assche __blk_mq_register_dev(dev, q); 9869c1051aaSOmar Sandoval blk_mq_debugfs_register(q); 987a8ecdd71SBart Van Assche } 9889c1051aaSOmar Sandoval 989*cecf5d87SMing Lei /* 990*cecf5d87SMing Lei * The flag of QUEUE_FLAG_REGISTERED isn't set yet, so elevator 991*cecf5d87SMing Lei * switch won't happen at all. 992*cecf5d87SMing Lei */ 993344e9ffcSJens Axboe if (q->elevator) { 994*cecf5d87SMing Lei ret = elv_register_queue(q, false); 9958324aa91SJens Axboe if (ret) { 996*cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 9978324aa91SJens Axboe kobject_del(&q->kobj); 99880656b67SLiu Yuan blk_trace_remove_sysfs(dev); 999c87ffbb8SXiaotian Feng kobject_put(&dev->kobj); 10002c2086afSBart Van Assche return ret; 1001b410aff2STahsin Erdogan } 1002*cecf5d87SMing Lei has_elevator = true; 1003b410aff2STahsin Erdogan } 1004*cecf5d87SMing Lei 1005*cecf5d87SMing Lei mutex_lock(&q->sysfs_lock); 1006*cecf5d87SMing Lei blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); 1007*cecf5d87SMing Lei wbt_enable_default(q); 1008*cecf5d87SMing Lei blk_throtl_register_queue(q); 1009*cecf5d87SMing Lei 1010*cecf5d87SMing Lei /* Now everything is ready and send out KOBJ_ADD uevent */ 1011*cecf5d87SMing Lei kobject_uevent(&q->kobj, KOBJ_ADD); 1012*cecf5d87SMing Lei if (has_elevator) 1013*cecf5d87SMing Lei kobject_uevent(&q->elevator->kobj, KOBJ_ADD); 1014*cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock); 1015*cecf5d87SMing Lei 1016b410aff2STahsin Erdogan ret = 0; 1017b410aff2STahsin Erdogan unlock: 1018*cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 10198324aa91SJens Axboe return ret; 10208324aa91SJens Axboe } 1021fa70d2e2SMike Snitzer EXPORT_SYMBOL_GPL(blk_register_queue); 10228324aa91SJens Axboe 10232c2086afSBart Van Assche /** 10242c2086afSBart Van Assche * blk_unregister_queue - counterpart of blk_register_queue() 10252c2086afSBart Van Assche * @disk: Disk of which the request queue should be unregistered from sysfs. 10262c2086afSBart Van Assche * 10272c2086afSBart Van Assche * Note: the caller is responsible for guaranteeing that this function is called 10282c2086afSBart Van Assche * after blk_register_queue() has finished. 10292c2086afSBart Van Assche */ 10308324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 10318324aa91SJens Axboe { 10328324aa91SJens Axboe struct request_queue *q = disk->queue; 1033*cecf5d87SMing Lei bool has_elevator; 10348324aa91SJens Axboe 1035fb199746SAkinobu Mita if (WARN_ON(!q)) 1036fb199746SAkinobu Mita return; 1037fb199746SAkinobu Mita 1038fa70d2e2SMike Snitzer /* Return early if disk->queue was never registered. */ 103958c898baSMing Lei if (!blk_queue_registered(q)) 1040fa70d2e2SMike Snitzer return; 1041fa70d2e2SMike Snitzer 1042667257e8SMike Snitzer /* 10432c2086afSBart Van Assche * Since sysfs_remove_dir() prevents adding new directory entries 10442c2086afSBart Van Assche * before removal of existing entries starts, protect against 10452c2086afSBart Van Assche * concurrent elv_iosched_store() calls. 1046667257e8SMike Snitzer */ 1047e9a823fbSDavid Jeffery mutex_lock(&q->sysfs_lock); 10488814ce8aSBart Van Assche blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 1049*cecf5d87SMing Lei has_elevator = !!q->elevator; 1050*cecf5d87SMing Lei mutex_unlock(&q->sysfs_lock); 1051334335d2SOmar Sandoval 1052*cecf5d87SMing Lei mutex_lock(&q->sysfs_dir_lock); 10532c2086afSBart Van Assche /* 10542c2086afSBart Van Assche * Remove the sysfs attributes before unregistering the queue data 10552c2086afSBart Van Assche * structures that can be modified through sysfs. 10562c2086afSBart Van Assche */ 1057344e9ffcSJens Axboe if (queue_is_mq(q)) 1058b21d5b30SMatias Bjørling blk_mq_unregister_dev(disk_to_dev(disk), q); 10598324aa91SJens Axboe 10608324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 10618324aa91SJens Axboe kobject_del(&q->kobj); 106248c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 1063667257e8SMike Snitzer 1064*cecf5d87SMing Lei if (has_elevator) 10652c2086afSBart Van Assche elv_unregister_queue(q); 1066*cecf5d87SMing Lei mutex_unlock(&q->sysfs_dir_lock); 10672c2086afSBart Van Assche 10682c2086afSBart Van Assche kobject_put(&disk_to_dev(disk)->kobj); 10698324aa91SJens Axboe } 1070