18324aa91SJens Axboe /* 28324aa91SJens Axboe * Functions related to sysfs handling 38324aa91SJens Axboe */ 48324aa91SJens Axboe #include <linux/kernel.h> 55a0e3ad6STejun Heo #include <linux/slab.h> 68324aa91SJens Axboe #include <linux/module.h> 78324aa91SJens Axboe #include <linux/bio.h> 88324aa91SJens Axboe #include <linux/blkdev.h> 966114cadSTejun Heo #include <linux/backing-dev.h> 108324aa91SJens Axboe #include <linux/blktrace_api.h> 11320ae51fSJens Axboe #include <linux/blk-mq.h> 12eea8f41cSTejun Heo #include <linux/blk-cgroup.h> 138324aa91SJens Axboe 148324aa91SJens Axboe #include "blk.h" 153edcc0ceSMing Lei #include "blk-mq.h" 1687760e5eSJens Axboe #include "blk-wbt.h" 178324aa91SJens Axboe 188324aa91SJens Axboe struct queue_sysfs_entry { 198324aa91SJens Axboe struct attribute attr; 208324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 218324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 228324aa91SJens Axboe }; 238324aa91SJens Axboe 248324aa91SJens Axboe static ssize_t 259cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 268324aa91SJens Axboe { 279cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 288324aa91SJens Axboe } 298324aa91SJens Axboe 308324aa91SJens Axboe static ssize_t 318324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 328324aa91SJens Axboe { 33b1f3b64dSDave Reisner int err; 34b1f3b64dSDave Reisner unsigned long v; 358324aa91SJens Axboe 36ed751e68SJingoo Han err = kstrtoul(page, 10, &v); 37b1f3b64dSDave Reisner if (err || v > UINT_MAX) 38b1f3b64dSDave Reisner return -EINVAL; 39b1f3b64dSDave Reisner 40b1f3b64dSDave Reisner *var = v; 41b1f3b64dSDave Reisner 428324aa91SJens Axboe return count; 438324aa91SJens Axboe } 448324aa91SJens Axboe 4580e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page) 4687760e5eSJens Axboe { 4787760e5eSJens Axboe int err; 4880e091d1SJens Axboe s64 v; 4987760e5eSJens Axboe 5080e091d1SJens Axboe err = kstrtos64(page, 10, &v); 5187760e5eSJens Axboe if (err < 0) 5287760e5eSJens Axboe return err; 5387760e5eSJens Axboe 5487760e5eSJens Axboe *var = v; 5587760e5eSJens Axboe return 0; 5687760e5eSJens Axboe } 5787760e5eSJens Axboe 588324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 598324aa91SJens Axboe { 608324aa91SJens Axboe return queue_var_show(q->nr_requests, (page)); 618324aa91SJens Axboe } 628324aa91SJens Axboe 638324aa91SJens Axboe static ssize_t 648324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 658324aa91SJens Axboe { 668324aa91SJens Axboe unsigned long nr; 67e3a2b3f9SJens Axboe int ret, err; 68b8a9ae77SJens Axboe 69e3a2b3f9SJens Axboe if (!q->request_fn && !q->mq_ops) 70b8a9ae77SJens Axboe return -EINVAL; 71b8a9ae77SJens Axboe 72b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 73b1f3b64dSDave Reisner if (ret < 0) 74b1f3b64dSDave Reisner return ret; 75b1f3b64dSDave Reisner 768324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 778324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 788324aa91SJens Axboe 79e3a2b3f9SJens Axboe if (q->request_fn) 80e3a2b3f9SJens Axboe err = blk_update_nr_requests(q, nr); 81e3a2b3f9SJens Axboe else 82e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr); 838324aa91SJens Axboe 84e3a2b3f9SJens Axboe if (err) 85e3a2b3f9SJens Axboe return err; 86a051661cSTejun Heo 878324aa91SJens Axboe return ret; 888324aa91SJens Axboe } 898324aa91SJens Axboe 908324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 918324aa91SJens Axboe { 92dc3b17ccSJan Kara unsigned long ra_kb = q->backing_dev_info->ra_pages << 9309cbfeafSKirill A. Shutemov (PAGE_SHIFT - 10); 948324aa91SJens Axboe 958324aa91SJens Axboe return queue_var_show(ra_kb, (page)); 968324aa91SJens Axboe } 978324aa91SJens Axboe 988324aa91SJens Axboe static ssize_t 998324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 1008324aa91SJens Axboe { 1018324aa91SJens Axboe unsigned long ra_kb; 1028324aa91SJens Axboe ssize_t ret = queue_var_store(&ra_kb, page, count); 1038324aa91SJens Axboe 104b1f3b64dSDave Reisner if (ret < 0) 105b1f3b64dSDave Reisner return ret; 106b1f3b64dSDave Reisner 107dc3b17ccSJan Kara q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 1088324aa91SJens Axboe 1098324aa91SJens Axboe return ret; 1108324aa91SJens Axboe } 1118324aa91SJens Axboe 1128324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1138324aa91SJens Axboe { 114ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1158324aa91SJens Axboe 1168324aa91SJens Axboe return queue_var_show(max_sectors_kb, (page)); 1178324aa91SJens Axboe } 1188324aa91SJens Axboe 119c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 120c77a5710SMartin K. Petersen { 121c77a5710SMartin K. Petersen return queue_var_show(queue_max_segments(q), (page)); 122c77a5710SMartin K. Petersen } 123c77a5710SMartin K. Petersen 12413f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 12513f05c8dSMartin K. Petersen { 12613f05c8dSMartin K. Petersen return queue_var_show(q->limits.max_integrity_segments, (page)); 12713f05c8dSMartin K. Petersen } 12813f05c8dSMartin K. Petersen 129c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 130c77a5710SMartin K. Petersen { 131e692cb66SMartin K. Petersen if (blk_queue_cluster(q)) 132c77a5710SMartin K. Petersen return queue_var_show(queue_max_segment_size(q), (page)); 133c77a5710SMartin K. Petersen 13409cbfeafSKirill A. Shutemov return queue_var_show(PAGE_SIZE, (page)); 135c77a5710SMartin K. Petersen } 136c77a5710SMartin K. Petersen 137e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 138e68b903cSMartin K. Petersen { 139e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 140e68b903cSMartin K. Petersen } 141e68b903cSMartin K. Petersen 142c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 143c72758f3SMartin K. Petersen { 144c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 145c72758f3SMartin K. Petersen } 146c72758f3SMartin K. Petersen 14787caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 14887caf97cSHannes Reinecke { 14987caf97cSHannes Reinecke return queue_var_show(q->limits.chunk_sectors, page); 15087caf97cSHannes Reinecke } 15187caf97cSHannes Reinecke 152c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 153c72758f3SMartin K. Petersen { 154c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 155c72758f3SMartin K. Petersen } 156c72758f3SMartin K. Petersen 157c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 158c72758f3SMartin K. Petersen { 159c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1608324aa91SJens Axboe } 1618324aa91SJens Axboe 16286b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 16386b37281SMartin K. Petersen { 16486b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 16586b37281SMartin K. Petersen } 16686b37281SMartin K. Petersen 1670034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 1680034af03SJens Axboe { 1690034af03SJens Axboe 17018f922d0SAlan return sprintf(page, "%llu\n", 17118f922d0SAlan (unsigned long long)q->limits.max_hw_discard_sectors << 9); 1720034af03SJens Axboe } 1730034af03SJens Axboe 17486b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 17586b37281SMartin K. Petersen { 176a934a00aSMartin K. Petersen return sprintf(page, "%llu\n", 177a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9); 17886b37281SMartin K. Petersen } 17986b37281SMartin K. Petersen 1800034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q, 1810034af03SJens Axboe const char *page, size_t count) 1820034af03SJens Axboe { 1830034af03SJens Axboe unsigned long max_discard; 1840034af03SJens Axboe ssize_t ret = queue_var_store(&max_discard, page, count); 1850034af03SJens Axboe 1860034af03SJens Axboe if (ret < 0) 1870034af03SJens Axboe return ret; 1880034af03SJens Axboe 1890034af03SJens Axboe if (max_discard & (q->limits.discard_granularity - 1)) 1900034af03SJens Axboe return -EINVAL; 1910034af03SJens Axboe 1920034af03SJens Axboe max_discard >>= 9; 1930034af03SJens Axboe if (max_discard > UINT_MAX) 1940034af03SJens Axboe return -EINVAL; 1950034af03SJens Axboe 1960034af03SJens Axboe if (max_discard > q->limits.max_hw_discard_sectors) 1970034af03SJens Axboe max_discard = q->limits.max_hw_discard_sectors; 1980034af03SJens Axboe 1990034af03SJens Axboe q->limits.max_discard_sectors = max_discard; 2000034af03SJens Axboe return ret; 2010034af03SJens Axboe } 2020034af03SJens Axboe 20398262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 20498262f27SMartin K. Petersen { 20598262f27SMartin K. Petersen return queue_var_show(queue_discard_zeroes_data(q), page); 20698262f27SMartin K. Petersen } 20798262f27SMartin K. Petersen 2084363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 2094363ac7cSMartin K. Petersen { 2104363ac7cSMartin K. Petersen return sprintf(page, "%llu\n", 2114363ac7cSMartin K. Petersen (unsigned long long)q->limits.max_write_same_sectors << 9); 2124363ac7cSMartin K. Petersen } 2134363ac7cSMartin K. Petersen 214a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 215a6f0788eSChaitanya Kulkarni { 216a6f0788eSChaitanya Kulkarni return sprintf(page, "%llu\n", 217a6f0788eSChaitanya Kulkarni (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 218a6f0788eSChaitanya Kulkarni } 2194363ac7cSMartin K. Petersen 2208324aa91SJens Axboe static ssize_t 2218324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 2228324aa91SJens Axboe { 2238324aa91SJens Axboe unsigned long max_sectors_kb, 224ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 22509cbfeafSKirill A. Shutemov page_kb = 1 << (PAGE_SHIFT - 10); 2268324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 2278324aa91SJens Axboe 228b1f3b64dSDave Reisner if (ret < 0) 229b1f3b64dSDave Reisner return ret; 230b1f3b64dSDave Reisner 231ca369d51SMartin K. Petersen max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 232ca369d51SMartin K. Petersen q->limits.max_dev_sectors >> 1); 233ca369d51SMartin K. Petersen 2348324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 2358324aa91SJens Axboe return -EINVAL; 2367c239517SWu Fengguang 2378324aa91SJens Axboe spin_lock_irq(q->queue_lock); 238c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 239dc3b17ccSJan Kara q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 2408324aa91SJens Axboe spin_unlock_irq(q->queue_lock); 2418324aa91SJens Axboe 2428324aa91SJens Axboe return ret; 2438324aa91SJens Axboe } 2448324aa91SJens Axboe 2458324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 2468324aa91SJens Axboe { 247ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 2488324aa91SJens Axboe 2498324aa91SJens Axboe return queue_var_show(max_hw_sectors_kb, (page)); 2508324aa91SJens Axboe } 2518324aa91SJens Axboe 252956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 253956bcb7cSJens Axboe static ssize_t \ 254956bcb7cSJens Axboe queue_show_##name(struct request_queue *q, char *page) \ 255956bcb7cSJens Axboe { \ 256956bcb7cSJens Axboe int bit; \ 257956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 258956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 259956bcb7cSJens Axboe } \ 260956bcb7cSJens Axboe static ssize_t \ 261956bcb7cSJens Axboe queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 262956bcb7cSJens Axboe { \ 263956bcb7cSJens Axboe unsigned long val; \ 264956bcb7cSJens Axboe ssize_t ret; \ 265956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 266c678ef52SArnd Bergmann if (ret < 0) \ 267c678ef52SArnd Bergmann return ret; \ 268956bcb7cSJens Axboe if (neg) \ 269956bcb7cSJens Axboe val = !val; \ 270956bcb7cSJens Axboe \ 271956bcb7cSJens Axboe spin_lock_irq(q->queue_lock); \ 272956bcb7cSJens Axboe if (val) \ 273956bcb7cSJens Axboe queue_flag_set(QUEUE_FLAG_##flag, q); \ 274956bcb7cSJens Axboe else \ 275956bcb7cSJens Axboe queue_flag_clear(QUEUE_FLAG_##flag, q); \ 276956bcb7cSJens Axboe spin_unlock_irq(q->queue_lock); \ 277956bcb7cSJens Axboe return ret; \ 2781308835fSBartlomiej Zolnierkiewicz } 2791308835fSBartlomiej Zolnierkiewicz 280956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 281956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 282956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 283956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 2841308835fSBartlomiej Zolnierkiewicz 285797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page) 286797476b8SDamien Le Moal { 287797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 288797476b8SDamien Le Moal case BLK_ZONED_HA: 289797476b8SDamien Le Moal return sprintf(page, "host-aware\n"); 290797476b8SDamien Le Moal case BLK_ZONED_HM: 291797476b8SDamien Le Moal return sprintf(page, "host-managed\n"); 292797476b8SDamien Le Moal default: 293797476b8SDamien Le Moal return sprintf(page, "none\n"); 294797476b8SDamien Le Moal } 295797476b8SDamien Le Moal } 296797476b8SDamien Le Moal 297ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 298ac9fafa1SAlan D. Brunelle { 299488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 300488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 301ac9fafa1SAlan D. Brunelle } 302ac9fafa1SAlan D. Brunelle 303ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 304ac9fafa1SAlan D. Brunelle size_t count) 305ac9fafa1SAlan D. Brunelle { 306ac9fafa1SAlan D. Brunelle unsigned long nm; 307ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 308ac9fafa1SAlan D. Brunelle 309b1f3b64dSDave Reisner if (ret < 0) 310b1f3b64dSDave Reisner return ret; 311b1f3b64dSDave Reisner 312bf0f9702SJens Axboe spin_lock_irq(q->queue_lock); 313bf0f9702SJens Axboe queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 314488991e2SAlan D. Brunelle queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 315488991e2SAlan D. Brunelle if (nm == 2) 316488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOMERGES, q); 317488991e2SAlan D. Brunelle else if (nm) 318488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 319bf0f9702SJens Axboe spin_unlock_irq(q->queue_lock); 3201308835fSBartlomiej Zolnierkiewicz 321ac9fafa1SAlan D. Brunelle return ret; 322ac9fafa1SAlan D. Brunelle } 323ac9fafa1SAlan D. Brunelle 324c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 325c7c22e4dSJens Axboe { 3269cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 3275757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 328c7c22e4dSJens Axboe 3295757a6d7SDan Williams return queue_var_show(set << force, page); 330c7c22e4dSJens Axboe } 331c7c22e4dSJens Axboe 332c7c22e4dSJens Axboe static ssize_t 333c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 334c7c22e4dSJens Axboe { 335c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 3360a06ff06SChristoph Hellwig #ifdef CONFIG_SMP 337c7c22e4dSJens Axboe unsigned long val; 338c7c22e4dSJens Axboe 339c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 340b1f3b64dSDave Reisner if (ret < 0) 341b1f3b64dSDave Reisner return ret; 342b1f3b64dSDave Reisner 343c7c22e4dSJens Axboe spin_lock_irq(q->queue_lock); 344e8037d49SEric Seppanen if (val == 2) { 345c7c22e4dSJens Axboe queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 3465757a6d7SDan Williams queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 347e8037d49SEric Seppanen } else if (val == 1) { 348e8037d49SEric Seppanen queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 349e8037d49SEric Seppanen queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 350e8037d49SEric Seppanen } else if (val == 0) { 351c7c22e4dSJens Axboe queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 3525757a6d7SDan Williams queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 3535757a6d7SDan Williams } 354c7c22e4dSJens Axboe spin_unlock_irq(q->queue_lock); 355c7c22e4dSJens Axboe #endif 356c7c22e4dSJens Axboe return ret; 357c7c22e4dSJens Axboe } 3588324aa91SJens Axboe 35906426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 36006426adfSJens Axboe { 36164f1c21eSJens Axboe int val; 36264f1c21eSJens Axboe 36364f1c21eSJens Axboe if (q->poll_nsec == -1) 36464f1c21eSJens Axboe val = -1; 36564f1c21eSJens Axboe else 36664f1c21eSJens Axboe val = q->poll_nsec / 1000; 36764f1c21eSJens Axboe 36864f1c21eSJens Axboe return sprintf(page, "%d\n", val); 36906426adfSJens Axboe } 37006426adfSJens Axboe 37106426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 37206426adfSJens Axboe size_t count) 37306426adfSJens Axboe { 37464f1c21eSJens Axboe int err, val; 37506426adfSJens Axboe 37606426adfSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 37706426adfSJens Axboe return -EINVAL; 37806426adfSJens Axboe 37964f1c21eSJens Axboe err = kstrtoint(page, 10, &val); 38064f1c21eSJens Axboe if (err < 0) 38164f1c21eSJens Axboe return err; 38206426adfSJens Axboe 38364f1c21eSJens Axboe if (val == -1) 38464f1c21eSJens Axboe q->poll_nsec = -1; 38564f1c21eSJens Axboe else 38664f1c21eSJens Axboe q->poll_nsec = val * 1000; 38764f1c21eSJens Axboe 38864f1c21eSJens Axboe return count; 38906426adfSJens Axboe } 39006426adfSJens Axboe 39105229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page) 39205229beeSJens Axboe { 39305229beeSJens Axboe return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 39405229beeSJens Axboe } 39505229beeSJens Axboe 39605229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page, 39705229beeSJens Axboe size_t count) 39805229beeSJens Axboe { 39905229beeSJens Axboe unsigned long poll_on; 40005229beeSJens Axboe ssize_t ret; 40105229beeSJens Axboe 40205229beeSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 40305229beeSJens Axboe return -EINVAL; 40405229beeSJens Axboe 40505229beeSJens Axboe ret = queue_var_store(&poll_on, page, count); 40605229beeSJens Axboe if (ret < 0) 40705229beeSJens Axboe return ret; 40805229beeSJens Axboe 40905229beeSJens Axboe spin_lock_irq(q->queue_lock); 41005229beeSJens Axboe if (poll_on) 41105229beeSJens Axboe queue_flag_set(QUEUE_FLAG_POLL, q); 41205229beeSJens Axboe else 41305229beeSJens Axboe queue_flag_clear(QUEUE_FLAG_POLL, q); 41405229beeSJens Axboe spin_unlock_irq(q->queue_lock); 41505229beeSJens Axboe 41605229beeSJens Axboe return ret; 41705229beeSJens Axboe } 41805229beeSJens Axboe 41987760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 42087760e5eSJens Axboe { 42187760e5eSJens Axboe if (!q->rq_wb) 42287760e5eSJens Axboe return -EINVAL; 42387760e5eSJens Axboe 42487760e5eSJens Axboe return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000)); 42587760e5eSJens Axboe } 42687760e5eSJens Axboe 42787760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 42887760e5eSJens Axboe size_t count) 42987760e5eSJens Axboe { 43080e091d1SJens Axboe struct rq_wb *rwb; 43187760e5eSJens Axboe ssize_t ret; 43280e091d1SJens Axboe s64 val; 43387760e5eSJens Axboe 43487760e5eSJens Axboe ret = queue_var_store64(&val, page); 43587760e5eSJens Axboe if (ret < 0) 43687760e5eSJens Axboe return ret; 437d62118b6SJens Axboe if (val < -1) 438d62118b6SJens Axboe return -EINVAL; 439d62118b6SJens Axboe 440d62118b6SJens Axboe rwb = q->rq_wb; 441d62118b6SJens Axboe if (!rwb) { 442d62118b6SJens Axboe ret = wbt_init(q); 443d62118b6SJens Axboe if (ret) 444d62118b6SJens Axboe return ret; 445d62118b6SJens Axboe 446d62118b6SJens Axboe rwb = q->rq_wb; 447d62118b6SJens Axboe if (!rwb) 448d62118b6SJens Axboe return -EINVAL; 449d62118b6SJens Axboe } 45087760e5eSJens Axboe 45180e091d1SJens Axboe if (val == -1) 45280e091d1SJens Axboe rwb->min_lat_nsec = wbt_default_latency_nsec(q); 45380e091d1SJens Axboe else if (val >= 0) 45480e091d1SJens Axboe rwb->min_lat_nsec = val * 1000ULL; 455d62118b6SJens Axboe 456d62118b6SJens Axboe if (rwb->enable_state == WBT_STATE_ON_DEFAULT) 457d62118b6SJens Axboe rwb->enable_state = WBT_STATE_ON_MANUAL; 45880e091d1SJens Axboe 45980e091d1SJens Axboe wbt_update_limits(rwb); 46087760e5eSJens Axboe return count; 46187760e5eSJens Axboe } 46287760e5eSJens Axboe 46393e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page) 46493e9d8e8SJens Axboe { 46593e9d8e8SJens Axboe if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 46693e9d8e8SJens Axboe return sprintf(page, "write back\n"); 46793e9d8e8SJens Axboe 46893e9d8e8SJens Axboe return sprintf(page, "write through\n"); 46993e9d8e8SJens Axboe } 47093e9d8e8SJens Axboe 47193e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page, 47293e9d8e8SJens Axboe size_t count) 47393e9d8e8SJens Axboe { 47493e9d8e8SJens Axboe int set = -1; 47593e9d8e8SJens Axboe 47693e9d8e8SJens Axboe if (!strncmp(page, "write back", 10)) 47793e9d8e8SJens Axboe set = 1; 47893e9d8e8SJens Axboe else if (!strncmp(page, "write through", 13) || 47993e9d8e8SJens Axboe !strncmp(page, "none", 4)) 48093e9d8e8SJens Axboe set = 0; 48193e9d8e8SJens Axboe 48293e9d8e8SJens Axboe if (set == -1) 48393e9d8e8SJens Axboe return -EINVAL; 48493e9d8e8SJens Axboe 48593e9d8e8SJens Axboe spin_lock_irq(q->queue_lock); 48693e9d8e8SJens Axboe if (set) 48793e9d8e8SJens Axboe queue_flag_set(QUEUE_FLAG_WC, q); 48893e9d8e8SJens Axboe else 48993e9d8e8SJens Axboe queue_flag_clear(QUEUE_FLAG_WC, q); 49093e9d8e8SJens Axboe spin_unlock_irq(q->queue_lock); 49193e9d8e8SJens Axboe 49293e9d8e8SJens Axboe return count; 49393e9d8e8SJens Axboe } 49493e9d8e8SJens Axboe 495ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page) 496ea6ca600SYigal Korman { 497ea6ca600SYigal Korman return queue_var_show(blk_queue_dax(q), page); 498ea6ca600SYigal Korman } 499ea6ca600SYigal Korman 500cf43e6beSJens Axboe static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre) 501cf43e6beSJens Axboe { 502cf43e6beSJens Axboe return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n", 503cf43e6beSJens Axboe pre, (long long) stat->nr_samples, 504cf43e6beSJens Axboe (long long) stat->mean, (long long) stat->min, 505cf43e6beSJens Axboe (long long) stat->max); 506cf43e6beSJens Axboe } 507cf43e6beSJens Axboe 508cf43e6beSJens Axboe static ssize_t queue_stats_show(struct request_queue *q, char *page) 509cf43e6beSJens Axboe { 510cf43e6beSJens Axboe struct blk_rq_stat stat[2]; 511cf43e6beSJens Axboe ssize_t ret; 512cf43e6beSJens Axboe 513cf43e6beSJens Axboe blk_queue_stat_get(q, stat); 514cf43e6beSJens Axboe 515cf43e6beSJens Axboe ret = print_stat(page, &stat[BLK_STAT_READ], "read :"); 516cf43e6beSJens Axboe ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:"); 517cf43e6beSJens Axboe return ret; 518cf43e6beSJens Axboe } 519cf43e6beSJens Axboe 5208324aa91SJens Axboe static struct queue_sysfs_entry queue_requests_entry = { 5218324aa91SJens Axboe .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 5228324aa91SJens Axboe .show = queue_requests_show, 5238324aa91SJens Axboe .store = queue_requests_store, 5248324aa91SJens Axboe }; 5258324aa91SJens Axboe 5268324aa91SJens Axboe static struct queue_sysfs_entry queue_ra_entry = { 5278324aa91SJens Axboe .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, 5288324aa91SJens Axboe .show = queue_ra_show, 5298324aa91SJens Axboe .store = queue_ra_store, 5308324aa91SJens Axboe }; 5318324aa91SJens Axboe 5328324aa91SJens Axboe static struct queue_sysfs_entry queue_max_sectors_entry = { 5338324aa91SJens Axboe .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, 5348324aa91SJens Axboe .show = queue_max_sectors_show, 5358324aa91SJens Axboe .store = queue_max_sectors_store, 5368324aa91SJens Axboe }; 5378324aa91SJens Axboe 5388324aa91SJens Axboe static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 5398324aa91SJens Axboe .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, 5408324aa91SJens Axboe .show = queue_max_hw_sectors_show, 5418324aa91SJens Axboe }; 5428324aa91SJens Axboe 543c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segments_entry = { 544c77a5710SMartin K. Petersen .attr = {.name = "max_segments", .mode = S_IRUGO }, 545c77a5710SMartin K. Petersen .show = queue_max_segments_show, 546c77a5710SMartin K. Petersen }; 547c77a5710SMartin K. Petersen 54813f05c8dSMartin K. Petersen static struct queue_sysfs_entry queue_max_integrity_segments_entry = { 54913f05c8dSMartin K. Petersen .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, 55013f05c8dSMartin K. Petersen .show = queue_max_integrity_segments_show, 55113f05c8dSMartin K. Petersen }; 55213f05c8dSMartin K. Petersen 553c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segment_size_entry = { 554c77a5710SMartin K. Petersen .attr = {.name = "max_segment_size", .mode = S_IRUGO }, 555c77a5710SMartin K. Petersen .show = queue_max_segment_size_show, 556c77a5710SMartin K. Petersen }; 557c77a5710SMartin K. Petersen 5588324aa91SJens Axboe static struct queue_sysfs_entry queue_iosched_entry = { 5598324aa91SJens Axboe .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 5608324aa91SJens Axboe .show = elv_iosched_show, 5618324aa91SJens Axboe .store = elv_iosched_store, 5628324aa91SJens Axboe }; 5638324aa91SJens Axboe 564e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 565e68b903cSMartin K. Petersen .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 566e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 567e1defc4fSMartin K. Petersen }; 568e1defc4fSMartin K. Petersen 569e1defc4fSMartin K. Petersen static struct queue_sysfs_entry queue_logical_block_size_entry = { 570e1defc4fSMartin K. Petersen .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 571e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 572e68b903cSMartin K. Petersen }; 573e68b903cSMartin K. Petersen 574c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_physical_block_size_entry = { 575c72758f3SMartin K. Petersen .attr = {.name = "physical_block_size", .mode = S_IRUGO }, 576c72758f3SMartin K. Petersen .show = queue_physical_block_size_show, 577c72758f3SMartin K. Petersen }; 578c72758f3SMartin K. Petersen 57987caf97cSHannes Reinecke static struct queue_sysfs_entry queue_chunk_sectors_entry = { 58087caf97cSHannes Reinecke .attr = {.name = "chunk_sectors", .mode = S_IRUGO }, 58187caf97cSHannes Reinecke .show = queue_chunk_sectors_show, 58287caf97cSHannes Reinecke }; 58387caf97cSHannes Reinecke 584c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_min_entry = { 585c72758f3SMartin K. Petersen .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, 586c72758f3SMartin K. Petersen .show = queue_io_min_show, 587c72758f3SMartin K. Petersen }; 588c72758f3SMartin K. Petersen 589c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_opt_entry = { 590c72758f3SMartin K. Petersen .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, 591c72758f3SMartin K. Petersen .show = queue_io_opt_show, 5928324aa91SJens Axboe }; 5938324aa91SJens Axboe 59486b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_granularity_entry = { 59586b37281SMartin K. Petersen .attr = {.name = "discard_granularity", .mode = S_IRUGO }, 59686b37281SMartin K. Petersen .show = queue_discard_granularity_show, 59786b37281SMartin K. Petersen }; 59886b37281SMartin K. Petersen 5990034af03SJens Axboe static struct queue_sysfs_entry queue_discard_max_hw_entry = { 6000034af03SJens Axboe .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, 6010034af03SJens Axboe .show = queue_discard_max_hw_show, 6020034af03SJens Axboe }; 6030034af03SJens Axboe 60486b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_max_entry = { 6050034af03SJens Axboe .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, 60686b37281SMartin K. Petersen .show = queue_discard_max_show, 6070034af03SJens Axboe .store = queue_discard_max_store, 60886b37281SMartin K. Petersen }; 60986b37281SMartin K. Petersen 61098262f27SMartin K. Petersen static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 61198262f27SMartin K. Petersen .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, 61298262f27SMartin K. Petersen .show = queue_discard_zeroes_data_show, 61398262f27SMartin K. Petersen }; 61498262f27SMartin K. Petersen 6154363ac7cSMartin K. Petersen static struct queue_sysfs_entry queue_write_same_max_entry = { 6164363ac7cSMartin K. Petersen .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, 6174363ac7cSMartin K. Petersen .show = queue_write_same_max_show, 6184363ac7cSMartin K. Petersen }; 6194363ac7cSMartin K. Petersen 620a6f0788eSChaitanya Kulkarni static struct queue_sysfs_entry queue_write_zeroes_max_entry = { 621a6f0788eSChaitanya Kulkarni .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO }, 622a6f0788eSChaitanya Kulkarni .show = queue_write_zeroes_max_show, 623a6f0788eSChaitanya Kulkarni }; 624a6f0788eSChaitanya Kulkarni 6251308835fSBartlomiej Zolnierkiewicz static struct queue_sysfs_entry queue_nonrot_entry = { 6261308835fSBartlomiej Zolnierkiewicz .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 627956bcb7cSJens Axboe .show = queue_show_nonrot, 628956bcb7cSJens Axboe .store = queue_store_nonrot, 6291308835fSBartlomiej Zolnierkiewicz }; 6301308835fSBartlomiej Zolnierkiewicz 631797476b8SDamien Le Moal static struct queue_sysfs_entry queue_zoned_entry = { 632797476b8SDamien Le Moal .attr = {.name = "zoned", .mode = S_IRUGO }, 633797476b8SDamien Le Moal .show = queue_zoned_show, 634797476b8SDamien Le Moal }; 635797476b8SDamien Le Moal 636ac9fafa1SAlan D. Brunelle static struct queue_sysfs_entry queue_nomerges_entry = { 637ac9fafa1SAlan D. Brunelle .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, 638ac9fafa1SAlan D. Brunelle .show = queue_nomerges_show, 639ac9fafa1SAlan D. Brunelle .store = queue_nomerges_store, 640ac9fafa1SAlan D. Brunelle }; 641ac9fafa1SAlan D. Brunelle 642c7c22e4dSJens Axboe static struct queue_sysfs_entry queue_rq_affinity_entry = { 643c7c22e4dSJens Axboe .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, 644c7c22e4dSJens Axboe .show = queue_rq_affinity_show, 645c7c22e4dSJens Axboe .store = queue_rq_affinity_store, 646c7c22e4dSJens Axboe }; 647c7c22e4dSJens Axboe 648bc58ba94SJens Axboe static struct queue_sysfs_entry queue_iostats_entry = { 649bc58ba94SJens Axboe .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 650956bcb7cSJens Axboe .show = queue_show_iostats, 651956bcb7cSJens Axboe .store = queue_store_iostats, 652bc58ba94SJens Axboe }; 653bc58ba94SJens Axboe 654e2e1a148SJens Axboe static struct queue_sysfs_entry queue_random_entry = { 655e2e1a148SJens Axboe .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, 656956bcb7cSJens Axboe .show = queue_show_random, 657956bcb7cSJens Axboe .store = queue_store_random, 658e2e1a148SJens Axboe }; 659e2e1a148SJens Axboe 66005229beeSJens Axboe static struct queue_sysfs_entry queue_poll_entry = { 66105229beeSJens Axboe .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, 66205229beeSJens Axboe .show = queue_poll_show, 66305229beeSJens Axboe .store = queue_poll_store, 66405229beeSJens Axboe }; 66505229beeSJens Axboe 66606426adfSJens Axboe static struct queue_sysfs_entry queue_poll_delay_entry = { 66706426adfSJens Axboe .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR }, 66806426adfSJens Axboe .show = queue_poll_delay_show, 66906426adfSJens Axboe .store = queue_poll_delay_store, 67006426adfSJens Axboe }; 67106426adfSJens Axboe 67293e9d8e8SJens Axboe static struct queue_sysfs_entry queue_wc_entry = { 67393e9d8e8SJens Axboe .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, 67493e9d8e8SJens Axboe .show = queue_wc_show, 67593e9d8e8SJens Axboe .store = queue_wc_store, 67693e9d8e8SJens Axboe }; 67793e9d8e8SJens Axboe 678ea6ca600SYigal Korman static struct queue_sysfs_entry queue_dax_entry = { 679ea6ca600SYigal Korman .attr = {.name = "dax", .mode = S_IRUGO }, 680ea6ca600SYigal Korman .show = queue_dax_show, 681ea6ca600SYigal Korman }; 682ea6ca600SYigal Korman 683cf43e6beSJens Axboe static struct queue_sysfs_entry queue_stats_entry = { 684cf43e6beSJens Axboe .attr = {.name = "stats", .mode = S_IRUGO }, 685cf43e6beSJens Axboe .show = queue_stats_show, 686cf43e6beSJens Axboe }; 687cf43e6beSJens Axboe 68887760e5eSJens Axboe static struct queue_sysfs_entry queue_wb_lat_entry = { 68987760e5eSJens Axboe .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR }, 69087760e5eSJens Axboe .show = queue_wb_lat_show, 69187760e5eSJens Axboe .store = queue_wb_lat_store, 69287760e5eSJens Axboe }; 69387760e5eSJens Axboe 6948324aa91SJens Axboe static struct attribute *default_attrs[] = { 6958324aa91SJens Axboe &queue_requests_entry.attr, 6968324aa91SJens Axboe &queue_ra_entry.attr, 6978324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 6988324aa91SJens Axboe &queue_max_sectors_entry.attr, 699c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 70013f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 701c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 7028324aa91SJens Axboe &queue_iosched_entry.attr, 703e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 704e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 705c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 70687caf97cSHannes Reinecke &queue_chunk_sectors_entry.attr, 707c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 708c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 70986b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 71086b37281SMartin K. Petersen &queue_discard_max_entry.attr, 7110034af03SJens Axboe &queue_discard_max_hw_entry.attr, 71298262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 7134363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr, 714a6f0788eSChaitanya Kulkarni &queue_write_zeroes_max_entry.attr, 7151308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 716797476b8SDamien Le Moal &queue_zoned_entry.attr, 717ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 718c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 719bc58ba94SJens Axboe &queue_iostats_entry.attr, 720e2e1a148SJens Axboe &queue_random_entry.attr, 72105229beeSJens Axboe &queue_poll_entry.attr, 72293e9d8e8SJens Axboe &queue_wc_entry.attr, 723ea6ca600SYigal Korman &queue_dax_entry.attr, 724cf43e6beSJens Axboe &queue_stats_entry.attr, 72587760e5eSJens Axboe &queue_wb_lat_entry.attr, 72606426adfSJens Axboe &queue_poll_delay_entry.attr, 7278324aa91SJens Axboe NULL, 7288324aa91SJens Axboe }; 7298324aa91SJens Axboe 7308324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 7318324aa91SJens Axboe 7328324aa91SJens Axboe static ssize_t 7338324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 7348324aa91SJens Axboe { 7358324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7368324aa91SJens Axboe struct request_queue *q = 7378324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 7388324aa91SJens Axboe ssize_t res; 7398324aa91SJens Axboe 7408324aa91SJens Axboe if (!entry->show) 7418324aa91SJens Axboe return -EIO; 7428324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7433f3299d5SBart Van Assche if (blk_queue_dying(q)) { 7448324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7458324aa91SJens Axboe return -ENOENT; 7468324aa91SJens Axboe } 7478324aa91SJens Axboe res = entry->show(q, page); 7488324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7498324aa91SJens Axboe return res; 7508324aa91SJens Axboe } 7518324aa91SJens Axboe 7528324aa91SJens Axboe static ssize_t 7538324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 7548324aa91SJens Axboe const char *page, size_t length) 7558324aa91SJens Axboe { 7568324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7576728cb0eSJens Axboe struct request_queue *q; 7588324aa91SJens Axboe ssize_t res; 7598324aa91SJens Axboe 7608324aa91SJens Axboe if (!entry->store) 7618324aa91SJens Axboe return -EIO; 7626728cb0eSJens Axboe 7636728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 7648324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7653f3299d5SBart Van Assche if (blk_queue_dying(q)) { 7668324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7678324aa91SJens Axboe return -ENOENT; 7688324aa91SJens Axboe } 7698324aa91SJens Axboe res = entry->store(q, page, length); 7708324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7718324aa91SJens Axboe return res; 7728324aa91SJens Axboe } 7738324aa91SJens Axboe 774548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head) 775548bc8e1STejun Heo { 776548bc8e1STejun Heo struct request_queue *q = container_of(rcu_head, struct request_queue, 777548bc8e1STejun Heo rcu_head); 778548bc8e1STejun Heo kmem_cache_free(blk_requestq_cachep, q); 779548bc8e1STejun Heo } 780548bc8e1STejun Heo 7818324aa91SJens Axboe /** 782499337bbSAndrew Morton * blk_release_queue: - release a &struct request_queue when it is no longer needed 783499337bbSAndrew Morton * @kobj: the kobj belonging to the request queue to be released 7848324aa91SJens Axboe * 7858324aa91SJens Axboe * Description: 786499337bbSAndrew Morton * blk_release_queue is the pair to blk_init_queue() or 7878324aa91SJens Axboe * blk_queue_make_request(). It should be called when a request queue is 7888324aa91SJens Axboe * being released; typically when a block device is being de-registered. 7898324aa91SJens Axboe * Currently, its primary task it to free all the &struct request 7908324aa91SJens Axboe * structures that were allocated to the queue and the queue itself. 7918324aa91SJens Axboe * 79245a9c9d9SBart Van Assche * Note: 79345a9c9d9SBart Van Assche * The low level driver must have finished any outstanding requests first 79445a9c9d9SBart Van Assche * via blk_cleanup_queue(). 7958324aa91SJens Axboe **/ 7968324aa91SJens Axboe static void blk_release_queue(struct kobject *kobj) 7978324aa91SJens Axboe { 7988324aa91SJens Axboe struct request_queue *q = 7998324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 8008324aa91SJens Axboe 80187760e5eSJens Axboe wbt_exit(q); 802d03f6cdcSJan Kara bdi_put(q->backing_dev_info); 803e8989faeSTejun Heo blkcg_exit_queue(q); 804e8989faeSTejun Heo 8057e5a8794STejun Heo if (q->elevator) { 8067e5a8794STejun Heo spin_lock_irq(q->queue_lock); 8077e5a8794STejun Heo ioc_clear_queue(q); 8087e5a8794STejun Heo spin_unlock_irq(q->queue_lock); 809777eb1bfSHannes Reinecke elevator_exit(q->elevator); 8107e5a8794STejun Heo } 811777eb1bfSHannes Reinecke 812a051661cSTejun Heo blk_exit_rl(&q->root_rl); 8138324aa91SJens Axboe 8148324aa91SJens Axboe if (q->queue_tags) 8158324aa91SJens Axboe __blk_queue_free_tags(q); 8168324aa91SJens Axboe 8176d247d7fSChristoph Hellwig if (!q->mq_ops) { 8186d247d7fSChristoph Hellwig if (q->exit_rq_fn) 8196d247d7fSChristoph Hellwig q->exit_rq_fn(q, q->fq->flush_rq); 820f70ced09SMing Lei blk_free_flush_queue(q->fq); 8216d247d7fSChristoph Hellwig } else { 822e09aae7eSMing Lei blk_mq_release(q); 8236d247d7fSChristoph Hellwig } 82418741986SChristoph Hellwig 8258324aa91SJens Axboe blk_trace_shutdown(q); 8268324aa91SJens Axboe 82762ebce16SOmar Sandoval if (q->mq_ops) 82862ebce16SOmar Sandoval blk_mq_debugfs_unregister(q); 82962ebce16SOmar Sandoval 83054efd50bSKent Overstreet if (q->bio_split) 83154efd50bSKent Overstreet bioset_free(q->bio_split); 83254efd50bSKent Overstreet 833a73f730dSTejun Heo ida_simple_remove(&blk_queue_ida, q->id); 834548bc8e1STejun Heo call_rcu(&q->rcu_head, blk_free_queue_rcu); 8358324aa91SJens Axboe } 8368324aa91SJens Axboe 83752cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 8388324aa91SJens Axboe .show = queue_attr_show, 8398324aa91SJens Axboe .store = queue_attr_store, 8408324aa91SJens Axboe }; 8418324aa91SJens Axboe 8428324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 8438324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 8448324aa91SJens Axboe .default_attrs = default_attrs, 8458324aa91SJens Axboe .release = blk_release_queue, 8468324aa91SJens Axboe }; 8478324aa91SJens Axboe 84887760e5eSJens Axboe static void blk_wb_init(struct request_queue *q) 84987760e5eSJens Axboe { 85087760e5eSJens Axboe #ifndef CONFIG_BLK_WBT_MQ 85187760e5eSJens Axboe if (q->mq_ops) 85287760e5eSJens Axboe return; 85387760e5eSJens Axboe #endif 85487760e5eSJens Axboe #ifndef CONFIG_BLK_WBT_SQ 85587760e5eSJens Axboe if (q->request_fn) 85687760e5eSJens Axboe return; 85787760e5eSJens Axboe #endif 85887760e5eSJens Axboe 85987760e5eSJens Axboe /* 86087760e5eSJens Axboe * If this fails, we don't get throttling 86187760e5eSJens Axboe */ 8628054b89fSJens Axboe wbt_init(q); 86387760e5eSJens Axboe } 86487760e5eSJens Axboe 8658324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 8668324aa91SJens Axboe { 8678324aa91SJens Axboe int ret; 8681d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 8698324aa91SJens Axboe struct request_queue *q = disk->queue; 8708324aa91SJens Axboe 871fb199746SAkinobu Mita if (WARN_ON(!q)) 8728324aa91SJens Axboe return -ENXIO; 8738324aa91SJens Axboe 874749fefe6STejun Heo /* 87517497acbSTejun Heo * SCSI probing may synchronously create and destroy a lot of 87617497acbSTejun Heo * request_queues for non-existent devices. Shutting down a fully 87717497acbSTejun Heo * functional queue takes measureable wallclock time as RCU grace 87817497acbSTejun Heo * periods are involved. To avoid excessive latency in these 87917497acbSTejun Heo * cases, a request_queue starts out in a degraded mode which is 88017497acbSTejun Heo * faster to shut down and is made fully functional here as 88117497acbSTejun Heo * request_queues for non-existent devices never get registered. 882749fefe6STejun Heo */ 883df35c7c9SAlan Stern if (!blk_queue_init_done(q)) { 884320ae51fSJens Axboe queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); 8853ef28e83SDan Williams percpu_ref_switch_to_percpu(&q->q_usage_counter); 886776687bcSTejun Heo blk_queue_bypass_end(q); 887df35c7c9SAlan Stern } 888749fefe6STejun Heo 8891d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 8901d54ad6dSLi Zefan if (ret) 8911d54ad6dSLi Zefan return ret; 8921d54ad6dSLi Zefan 893c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 894ed5302d3SLiu Yuan if (ret < 0) { 895ed5302d3SLiu Yuan blk_trace_remove_sysfs(dev); 8968324aa91SJens Axboe return ret; 897ed5302d3SLiu Yuan } 8988324aa91SJens Axboe 8998324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_ADD); 9008324aa91SJens Axboe 901320ae51fSJens Axboe if (q->mq_ops) 902b21d5b30SMatias Bjørling blk_mq_register_dev(dev, q); 903320ae51fSJens Axboe 90487760e5eSJens Axboe blk_wb_init(q); 90587760e5eSJens Axboe 906*80c6b157SOmar Sandoval if (q->request_fn || (q->mq_ops && q->elevator)) { 9078324aa91SJens Axboe ret = elv_register_queue(q); 9088324aa91SJens Axboe if (ret) { 9098324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9108324aa91SJens Axboe kobject_del(&q->kobj); 91180656b67SLiu Yuan blk_trace_remove_sysfs(dev); 912c87ffbb8SXiaotian Feng kobject_put(&dev->kobj); 9138324aa91SJens Axboe return ret; 9148324aa91SJens Axboe } 915*80c6b157SOmar Sandoval } 9168324aa91SJens Axboe 9178324aa91SJens Axboe return 0; 9188324aa91SJens Axboe } 9198324aa91SJens Axboe 9208324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 9218324aa91SJens Axboe { 9228324aa91SJens Axboe struct request_queue *q = disk->queue; 9238324aa91SJens Axboe 924fb199746SAkinobu Mita if (WARN_ON(!q)) 925fb199746SAkinobu Mita return; 926fb199746SAkinobu Mita 927320ae51fSJens Axboe if (q->mq_ops) 928b21d5b30SMatias Bjørling blk_mq_unregister_dev(disk_to_dev(disk), q); 929320ae51fSJens Axboe 930*80c6b157SOmar Sandoval if (q->request_fn || (q->mq_ops && q->elevator)) 9318324aa91SJens Axboe elv_unregister_queue(q); 9328324aa91SJens Axboe 9338324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9348324aa91SJens Axboe kobject_del(&q->kobj); 93548c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 936ed9e1982STejun Heo kobject_put(&disk_to_dev(disk)->kobj); 9378324aa91SJens Axboe } 938