18324aa91SJens Axboe /* 28324aa91SJens Axboe * Functions related to sysfs handling 38324aa91SJens Axboe */ 48324aa91SJens Axboe #include <linux/kernel.h> 55a0e3ad6STejun Heo #include <linux/slab.h> 68324aa91SJens Axboe #include <linux/module.h> 78324aa91SJens Axboe #include <linux/bio.h> 88324aa91SJens Axboe #include <linux/blkdev.h> 966114cadSTejun Heo #include <linux/backing-dev.h> 108324aa91SJens Axboe #include <linux/blktrace_api.h> 11320ae51fSJens Axboe #include <linux/blk-mq.h> 12eea8f41cSTejun Heo #include <linux/blk-cgroup.h> 138324aa91SJens Axboe 148324aa91SJens Axboe #include "blk.h" 153edcc0ceSMing Lei #include "blk-mq.h" 16d173a251SOmar Sandoval #include "blk-mq-debugfs.h" 1787760e5eSJens Axboe #include "blk-wbt.h" 188324aa91SJens Axboe 198324aa91SJens Axboe struct queue_sysfs_entry { 208324aa91SJens Axboe struct attribute attr; 218324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 228324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 238324aa91SJens Axboe }; 248324aa91SJens Axboe 258324aa91SJens Axboe static ssize_t 269cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 278324aa91SJens Axboe { 289cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 298324aa91SJens Axboe } 308324aa91SJens Axboe 318324aa91SJens Axboe static ssize_t 328324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 338324aa91SJens Axboe { 34b1f3b64dSDave Reisner int err; 35b1f3b64dSDave Reisner unsigned long v; 368324aa91SJens Axboe 37ed751e68SJingoo Han err = kstrtoul(page, 10, &v); 38b1f3b64dSDave Reisner if (err || v > UINT_MAX) 39b1f3b64dSDave Reisner return -EINVAL; 40b1f3b64dSDave Reisner 41b1f3b64dSDave Reisner *var = v; 42b1f3b64dSDave Reisner 438324aa91SJens Axboe return count; 448324aa91SJens Axboe } 458324aa91SJens Axboe 4680e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page) 4787760e5eSJens Axboe { 4887760e5eSJens Axboe int err; 4980e091d1SJens Axboe s64 v; 5087760e5eSJens Axboe 5180e091d1SJens Axboe err = kstrtos64(page, 10, &v); 5287760e5eSJens Axboe if (err < 0) 5387760e5eSJens Axboe return err; 5487760e5eSJens Axboe 5587760e5eSJens Axboe *var = v; 5687760e5eSJens Axboe return 0; 5787760e5eSJens Axboe } 5887760e5eSJens Axboe 598324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 608324aa91SJens Axboe { 618324aa91SJens Axboe return queue_var_show(q->nr_requests, (page)); 628324aa91SJens Axboe } 638324aa91SJens Axboe 648324aa91SJens Axboe static ssize_t 658324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 668324aa91SJens Axboe { 678324aa91SJens Axboe unsigned long nr; 68e3a2b3f9SJens Axboe int ret, err; 69b8a9ae77SJens Axboe 70e3a2b3f9SJens Axboe if (!q->request_fn && !q->mq_ops) 71b8a9ae77SJens Axboe return -EINVAL; 72b8a9ae77SJens Axboe 73b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 74b1f3b64dSDave Reisner if (ret < 0) 75b1f3b64dSDave Reisner return ret; 76b1f3b64dSDave Reisner 778324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 788324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 798324aa91SJens Axboe 80e3a2b3f9SJens Axboe if (q->request_fn) 81e3a2b3f9SJens Axboe err = blk_update_nr_requests(q, nr); 82e3a2b3f9SJens Axboe else 83e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr); 848324aa91SJens Axboe 85e3a2b3f9SJens Axboe if (err) 86e3a2b3f9SJens Axboe return err; 87a051661cSTejun Heo 888324aa91SJens Axboe return ret; 898324aa91SJens Axboe } 908324aa91SJens Axboe 918324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 928324aa91SJens Axboe { 93dc3b17ccSJan Kara unsigned long ra_kb = q->backing_dev_info->ra_pages << 9409cbfeafSKirill A. Shutemov (PAGE_SHIFT - 10); 958324aa91SJens Axboe 968324aa91SJens Axboe return queue_var_show(ra_kb, (page)); 978324aa91SJens Axboe } 988324aa91SJens Axboe 998324aa91SJens Axboe static ssize_t 1008324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 1018324aa91SJens Axboe { 1028324aa91SJens Axboe unsigned long ra_kb; 1038324aa91SJens Axboe ssize_t ret = queue_var_store(&ra_kb, page, count); 1048324aa91SJens Axboe 105b1f3b64dSDave Reisner if (ret < 0) 106b1f3b64dSDave Reisner return ret; 107b1f3b64dSDave Reisner 108dc3b17ccSJan Kara q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 1098324aa91SJens Axboe 1108324aa91SJens Axboe return ret; 1118324aa91SJens Axboe } 1128324aa91SJens Axboe 1138324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1148324aa91SJens Axboe { 115ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1168324aa91SJens Axboe 1178324aa91SJens Axboe return queue_var_show(max_sectors_kb, (page)); 1188324aa91SJens Axboe } 1198324aa91SJens Axboe 120c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 121c77a5710SMartin K. Petersen { 122c77a5710SMartin K. Petersen return queue_var_show(queue_max_segments(q), (page)); 123c77a5710SMartin K. Petersen } 124c77a5710SMartin K. Petersen 1251e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q, 1261e739730SChristoph Hellwig char *page) 1271e739730SChristoph Hellwig { 1281e739730SChristoph Hellwig return queue_var_show(queue_max_discard_segments(q), (page)); 1291e739730SChristoph Hellwig } 1301e739730SChristoph Hellwig 13113f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 13213f05c8dSMartin K. Petersen { 13313f05c8dSMartin K. Petersen return queue_var_show(q->limits.max_integrity_segments, (page)); 13413f05c8dSMartin K. Petersen } 13513f05c8dSMartin K. Petersen 136c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 137c77a5710SMartin K. Petersen { 138e692cb66SMartin K. Petersen if (blk_queue_cluster(q)) 139c77a5710SMartin K. Petersen return queue_var_show(queue_max_segment_size(q), (page)); 140c77a5710SMartin K. Petersen 14109cbfeafSKirill A. Shutemov return queue_var_show(PAGE_SIZE, (page)); 142c77a5710SMartin K. Petersen } 143c77a5710SMartin K. Petersen 144e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 145e68b903cSMartin K. Petersen { 146e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 147e68b903cSMartin K. Petersen } 148e68b903cSMartin K. Petersen 149c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 150c72758f3SMartin K. Petersen { 151c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 152c72758f3SMartin K. Petersen } 153c72758f3SMartin K. Petersen 15487caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 15587caf97cSHannes Reinecke { 15687caf97cSHannes Reinecke return queue_var_show(q->limits.chunk_sectors, page); 15787caf97cSHannes Reinecke } 15887caf97cSHannes Reinecke 159c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 160c72758f3SMartin K. Petersen { 161c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 162c72758f3SMartin K. Petersen } 163c72758f3SMartin K. Petersen 164c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 165c72758f3SMartin K. Petersen { 166c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1678324aa91SJens Axboe } 1688324aa91SJens Axboe 16986b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 17086b37281SMartin K. Petersen { 17186b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 17286b37281SMartin K. Petersen } 17386b37281SMartin K. Petersen 1740034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 1750034af03SJens Axboe { 1760034af03SJens Axboe 17718f922d0SAlan return sprintf(page, "%llu\n", 17818f922d0SAlan (unsigned long long)q->limits.max_hw_discard_sectors << 9); 1790034af03SJens Axboe } 1800034af03SJens Axboe 18186b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 18286b37281SMartin K. Petersen { 183a934a00aSMartin K. Petersen return sprintf(page, "%llu\n", 184a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9); 18586b37281SMartin K. Petersen } 18686b37281SMartin K. Petersen 1870034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q, 1880034af03SJens Axboe const char *page, size_t count) 1890034af03SJens Axboe { 1900034af03SJens Axboe unsigned long max_discard; 1910034af03SJens Axboe ssize_t ret = queue_var_store(&max_discard, page, count); 1920034af03SJens Axboe 1930034af03SJens Axboe if (ret < 0) 1940034af03SJens Axboe return ret; 1950034af03SJens Axboe 1960034af03SJens Axboe if (max_discard & (q->limits.discard_granularity - 1)) 1970034af03SJens Axboe return -EINVAL; 1980034af03SJens Axboe 1990034af03SJens Axboe max_discard >>= 9; 2000034af03SJens Axboe if (max_discard > UINT_MAX) 2010034af03SJens Axboe return -EINVAL; 2020034af03SJens Axboe 2030034af03SJens Axboe if (max_discard > q->limits.max_hw_discard_sectors) 2040034af03SJens Axboe max_discard = q->limits.max_hw_discard_sectors; 2050034af03SJens Axboe 2060034af03SJens Axboe q->limits.max_discard_sectors = max_discard; 2070034af03SJens Axboe return ret; 2080034af03SJens Axboe } 2090034af03SJens Axboe 21098262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 21198262f27SMartin K. Petersen { 21248920ff2SChristoph Hellwig return queue_var_show(0, page); 21398262f27SMartin K. Petersen } 21498262f27SMartin K. Petersen 2154363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 2164363ac7cSMartin K. Petersen { 2174363ac7cSMartin K. Petersen return sprintf(page, "%llu\n", 2184363ac7cSMartin K. Petersen (unsigned long long)q->limits.max_write_same_sectors << 9); 2194363ac7cSMartin K. Petersen } 2204363ac7cSMartin K. Petersen 221a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 222a6f0788eSChaitanya Kulkarni { 223a6f0788eSChaitanya Kulkarni return sprintf(page, "%llu\n", 224a6f0788eSChaitanya Kulkarni (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 225a6f0788eSChaitanya Kulkarni } 2264363ac7cSMartin K. Petersen 2278324aa91SJens Axboe static ssize_t 2288324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 2298324aa91SJens Axboe { 2308324aa91SJens Axboe unsigned long max_sectors_kb, 231ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 23209cbfeafSKirill A. Shutemov page_kb = 1 << (PAGE_SHIFT - 10); 2338324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 2348324aa91SJens Axboe 235b1f3b64dSDave Reisner if (ret < 0) 236b1f3b64dSDave Reisner return ret; 237b1f3b64dSDave Reisner 238ca369d51SMartin K. Petersen max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 239ca369d51SMartin K. Petersen q->limits.max_dev_sectors >> 1); 240ca369d51SMartin K. Petersen 2418324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 2428324aa91SJens Axboe return -EINVAL; 2437c239517SWu Fengguang 2448324aa91SJens Axboe spin_lock_irq(q->queue_lock); 245c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 246dc3b17ccSJan Kara q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 2478324aa91SJens Axboe spin_unlock_irq(q->queue_lock); 2488324aa91SJens Axboe 2498324aa91SJens Axboe return ret; 2508324aa91SJens Axboe } 2518324aa91SJens Axboe 2528324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 2538324aa91SJens Axboe { 254ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 2558324aa91SJens Axboe 2568324aa91SJens Axboe return queue_var_show(max_hw_sectors_kb, (page)); 2578324aa91SJens Axboe } 2588324aa91SJens Axboe 259956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 260956bcb7cSJens Axboe static ssize_t \ 261956bcb7cSJens Axboe queue_show_##name(struct request_queue *q, char *page) \ 262956bcb7cSJens Axboe { \ 263956bcb7cSJens Axboe int bit; \ 264956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 265956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 266956bcb7cSJens Axboe } \ 267956bcb7cSJens Axboe static ssize_t \ 268956bcb7cSJens Axboe queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 269956bcb7cSJens Axboe { \ 270956bcb7cSJens Axboe unsigned long val; \ 271956bcb7cSJens Axboe ssize_t ret; \ 272956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 273c678ef52SArnd Bergmann if (ret < 0) \ 274c678ef52SArnd Bergmann return ret; \ 275956bcb7cSJens Axboe if (neg) \ 276956bcb7cSJens Axboe val = !val; \ 277956bcb7cSJens Axboe \ 278956bcb7cSJens Axboe spin_lock_irq(q->queue_lock); \ 279956bcb7cSJens Axboe if (val) \ 280956bcb7cSJens Axboe queue_flag_set(QUEUE_FLAG_##flag, q); \ 281956bcb7cSJens Axboe else \ 282956bcb7cSJens Axboe queue_flag_clear(QUEUE_FLAG_##flag, q); \ 283956bcb7cSJens Axboe spin_unlock_irq(q->queue_lock); \ 284956bcb7cSJens Axboe return ret; \ 2851308835fSBartlomiej Zolnierkiewicz } 2861308835fSBartlomiej Zolnierkiewicz 287956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 288956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 289956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 290956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 2911308835fSBartlomiej Zolnierkiewicz 292797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page) 293797476b8SDamien Le Moal { 294797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 295797476b8SDamien Le Moal case BLK_ZONED_HA: 296797476b8SDamien Le Moal return sprintf(page, "host-aware\n"); 297797476b8SDamien Le Moal case BLK_ZONED_HM: 298797476b8SDamien Le Moal return sprintf(page, "host-managed\n"); 299797476b8SDamien Le Moal default: 300797476b8SDamien Le Moal return sprintf(page, "none\n"); 301797476b8SDamien Le Moal } 302797476b8SDamien Le Moal } 303797476b8SDamien Le Moal 304ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 305ac9fafa1SAlan D. Brunelle { 306488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 307488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 308ac9fafa1SAlan D. Brunelle } 309ac9fafa1SAlan D. Brunelle 310ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 311ac9fafa1SAlan D. Brunelle size_t count) 312ac9fafa1SAlan D. Brunelle { 313ac9fafa1SAlan D. Brunelle unsigned long nm; 314ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 315ac9fafa1SAlan D. Brunelle 316b1f3b64dSDave Reisner if (ret < 0) 317b1f3b64dSDave Reisner return ret; 318b1f3b64dSDave Reisner 319bf0f9702SJens Axboe spin_lock_irq(q->queue_lock); 320bf0f9702SJens Axboe queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 321488991e2SAlan D. Brunelle queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 322488991e2SAlan D. Brunelle if (nm == 2) 323488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOMERGES, q); 324488991e2SAlan D. Brunelle else if (nm) 325488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 326bf0f9702SJens Axboe spin_unlock_irq(q->queue_lock); 3271308835fSBartlomiej Zolnierkiewicz 328ac9fafa1SAlan D. Brunelle return ret; 329ac9fafa1SAlan D. Brunelle } 330ac9fafa1SAlan D. Brunelle 331c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 332c7c22e4dSJens Axboe { 3339cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 3345757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 335c7c22e4dSJens Axboe 3365757a6d7SDan Williams return queue_var_show(set << force, page); 337c7c22e4dSJens Axboe } 338c7c22e4dSJens Axboe 339c7c22e4dSJens Axboe static ssize_t 340c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 341c7c22e4dSJens Axboe { 342c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 3430a06ff06SChristoph Hellwig #ifdef CONFIG_SMP 344c7c22e4dSJens Axboe unsigned long val; 345c7c22e4dSJens Axboe 346c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 347b1f3b64dSDave Reisner if (ret < 0) 348b1f3b64dSDave Reisner return ret; 349b1f3b64dSDave Reisner 350c7c22e4dSJens Axboe spin_lock_irq(q->queue_lock); 351e8037d49SEric Seppanen if (val == 2) { 352c7c22e4dSJens Axboe queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 3535757a6d7SDan Williams queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 354e8037d49SEric Seppanen } else if (val == 1) { 355e8037d49SEric Seppanen queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 356e8037d49SEric Seppanen queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 357e8037d49SEric Seppanen } else if (val == 0) { 358c7c22e4dSJens Axboe queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 3595757a6d7SDan Williams queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 3605757a6d7SDan Williams } 361c7c22e4dSJens Axboe spin_unlock_irq(q->queue_lock); 362c7c22e4dSJens Axboe #endif 363c7c22e4dSJens Axboe return ret; 364c7c22e4dSJens Axboe } 3658324aa91SJens Axboe 36606426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 36706426adfSJens Axboe { 36864f1c21eSJens Axboe int val; 36964f1c21eSJens Axboe 37064f1c21eSJens Axboe if (q->poll_nsec == -1) 37164f1c21eSJens Axboe val = -1; 37264f1c21eSJens Axboe else 37364f1c21eSJens Axboe val = q->poll_nsec / 1000; 37464f1c21eSJens Axboe 37564f1c21eSJens Axboe return sprintf(page, "%d\n", val); 37606426adfSJens Axboe } 37706426adfSJens Axboe 37806426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 37906426adfSJens Axboe size_t count) 38006426adfSJens Axboe { 38164f1c21eSJens Axboe int err, val; 38206426adfSJens Axboe 38306426adfSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 38406426adfSJens Axboe return -EINVAL; 38506426adfSJens Axboe 38664f1c21eSJens Axboe err = kstrtoint(page, 10, &val); 38764f1c21eSJens Axboe if (err < 0) 38864f1c21eSJens Axboe return err; 38906426adfSJens Axboe 39064f1c21eSJens Axboe if (val == -1) 39164f1c21eSJens Axboe q->poll_nsec = -1; 39264f1c21eSJens Axboe else 39364f1c21eSJens Axboe q->poll_nsec = val * 1000; 39464f1c21eSJens Axboe 39564f1c21eSJens Axboe return count; 39606426adfSJens Axboe } 39706426adfSJens Axboe 39805229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page) 39905229beeSJens Axboe { 40005229beeSJens Axboe return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 40105229beeSJens Axboe } 40205229beeSJens Axboe 40305229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page, 40405229beeSJens Axboe size_t count) 40505229beeSJens Axboe { 40605229beeSJens Axboe unsigned long poll_on; 40705229beeSJens Axboe ssize_t ret; 40805229beeSJens Axboe 40905229beeSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 41005229beeSJens Axboe return -EINVAL; 41105229beeSJens Axboe 41205229beeSJens Axboe ret = queue_var_store(&poll_on, page, count); 41305229beeSJens Axboe if (ret < 0) 41405229beeSJens Axboe return ret; 41505229beeSJens Axboe 41605229beeSJens Axboe spin_lock_irq(q->queue_lock); 41705229beeSJens Axboe if (poll_on) 41805229beeSJens Axboe queue_flag_set(QUEUE_FLAG_POLL, q); 41905229beeSJens Axboe else 42005229beeSJens Axboe queue_flag_clear(QUEUE_FLAG_POLL, q); 42105229beeSJens Axboe spin_unlock_irq(q->queue_lock); 42205229beeSJens Axboe 42305229beeSJens Axboe return ret; 42405229beeSJens Axboe } 42505229beeSJens Axboe 42687760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 42787760e5eSJens Axboe { 42887760e5eSJens Axboe if (!q->rq_wb) 42987760e5eSJens Axboe return -EINVAL; 43087760e5eSJens Axboe 43187760e5eSJens Axboe return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000)); 43287760e5eSJens Axboe } 43387760e5eSJens Axboe 43487760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 43587760e5eSJens Axboe size_t count) 43687760e5eSJens Axboe { 43780e091d1SJens Axboe struct rq_wb *rwb; 43887760e5eSJens Axboe ssize_t ret; 43980e091d1SJens Axboe s64 val; 44087760e5eSJens Axboe 44187760e5eSJens Axboe ret = queue_var_store64(&val, page); 44287760e5eSJens Axboe if (ret < 0) 44387760e5eSJens Axboe return ret; 444d62118b6SJens Axboe if (val < -1) 445d62118b6SJens Axboe return -EINVAL; 446d62118b6SJens Axboe 447d62118b6SJens Axboe rwb = q->rq_wb; 448d62118b6SJens Axboe if (!rwb) { 449d62118b6SJens Axboe ret = wbt_init(q); 450d62118b6SJens Axboe if (ret) 451d62118b6SJens Axboe return ret; 452d62118b6SJens Axboe 453d62118b6SJens Axboe rwb = q->rq_wb; 454d62118b6SJens Axboe if (!rwb) 455d62118b6SJens Axboe return -EINVAL; 456d62118b6SJens Axboe } 45787760e5eSJens Axboe 45880e091d1SJens Axboe if (val == -1) 45980e091d1SJens Axboe rwb->min_lat_nsec = wbt_default_latency_nsec(q); 46080e091d1SJens Axboe else if (val >= 0) 46180e091d1SJens Axboe rwb->min_lat_nsec = val * 1000ULL; 462d62118b6SJens Axboe 463d62118b6SJens Axboe if (rwb->enable_state == WBT_STATE_ON_DEFAULT) 464d62118b6SJens Axboe rwb->enable_state = WBT_STATE_ON_MANUAL; 46580e091d1SJens Axboe 46680e091d1SJens Axboe wbt_update_limits(rwb); 46787760e5eSJens Axboe return count; 46887760e5eSJens Axboe } 46987760e5eSJens Axboe 47093e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page) 47193e9d8e8SJens Axboe { 47293e9d8e8SJens Axboe if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 47393e9d8e8SJens Axboe return sprintf(page, "write back\n"); 47493e9d8e8SJens Axboe 47593e9d8e8SJens Axboe return sprintf(page, "write through\n"); 47693e9d8e8SJens Axboe } 47793e9d8e8SJens Axboe 47893e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page, 47993e9d8e8SJens Axboe size_t count) 48093e9d8e8SJens Axboe { 48193e9d8e8SJens Axboe int set = -1; 48293e9d8e8SJens Axboe 48393e9d8e8SJens Axboe if (!strncmp(page, "write back", 10)) 48493e9d8e8SJens Axboe set = 1; 48593e9d8e8SJens Axboe else if (!strncmp(page, "write through", 13) || 48693e9d8e8SJens Axboe !strncmp(page, "none", 4)) 48793e9d8e8SJens Axboe set = 0; 48893e9d8e8SJens Axboe 48993e9d8e8SJens Axboe if (set == -1) 49093e9d8e8SJens Axboe return -EINVAL; 49193e9d8e8SJens Axboe 49293e9d8e8SJens Axboe spin_lock_irq(q->queue_lock); 49393e9d8e8SJens Axboe if (set) 49493e9d8e8SJens Axboe queue_flag_set(QUEUE_FLAG_WC, q); 49593e9d8e8SJens Axboe else 49693e9d8e8SJens Axboe queue_flag_clear(QUEUE_FLAG_WC, q); 49793e9d8e8SJens Axboe spin_unlock_irq(q->queue_lock); 49893e9d8e8SJens Axboe 49993e9d8e8SJens Axboe return count; 50093e9d8e8SJens Axboe } 50193e9d8e8SJens Axboe 502ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page) 503ea6ca600SYigal Korman { 504ea6ca600SYigal Korman return queue_var_show(blk_queue_dax(q), page); 505ea6ca600SYigal Korman } 506ea6ca600SYigal Korman 5078324aa91SJens Axboe static struct queue_sysfs_entry queue_requests_entry = { 5088324aa91SJens Axboe .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 5098324aa91SJens Axboe .show = queue_requests_show, 5108324aa91SJens Axboe .store = queue_requests_store, 5118324aa91SJens Axboe }; 5128324aa91SJens Axboe 5138324aa91SJens Axboe static struct queue_sysfs_entry queue_ra_entry = { 5148324aa91SJens Axboe .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, 5158324aa91SJens Axboe .show = queue_ra_show, 5168324aa91SJens Axboe .store = queue_ra_store, 5178324aa91SJens Axboe }; 5188324aa91SJens Axboe 5198324aa91SJens Axboe static struct queue_sysfs_entry queue_max_sectors_entry = { 5208324aa91SJens Axboe .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, 5218324aa91SJens Axboe .show = queue_max_sectors_show, 5228324aa91SJens Axboe .store = queue_max_sectors_store, 5238324aa91SJens Axboe }; 5248324aa91SJens Axboe 5258324aa91SJens Axboe static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 5268324aa91SJens Axboe .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, 5278324aa91SJens Axboe .show = queue_max_hw_sectors_show, 5288324aa91SJens Axboe }; 5298324aa91SJens Axboe 530c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segments_entry = { 531c77a5710SMartin K. Petersen .attr = {.name = "max_segments", .mode = S_IRUGO }, 532c77a5710SMartin K. Petersen .show = queue_max_segments_show, 533c77a5710SMartin K. Petersen }; 534c77a5710SMartin K. Petersen 5351e739730SChristoph Hellwig static struct queue_sysfs_entry queue_max_discard_segments_entry = { 5361e739730SChristoph Hellwig .attr = {.name = "max_discard_segments", .mode = S_IRUGO }, 5371e739730SChristoph Hellwig .show = queue_max_discard_segments_show, 5381e739730SChristoph Hellwig }; 5391e739730SChristoph Hellwig 54013f05c8dSMartin K. Petersen static struct queue_sysfs_entry queue_max_integrity_segments_entry = { 54113f05c8dSMartin K. Petersen .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, 54213f05c8dSMartin K. Petersen .show = queue_max_integrity_segments_show, 54313f05c8dSMartin K. Petersen }; 54413f05c8dSMartin K. Petersen 545c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segment_size_entry = { 546c77a5710SMartin K. Petersen .attr = {.name = "max_segment_size", .mode = S_IRUGO }, 547c77a5710SMartin K. Petersen .show = queue_max_segment_size_show, 548c77a5710SMartin K. Petersen }; 549c77a5710SMartin K. Petersen 5508324aa91SJens Axboe static struct queue_sysfs_entry queue_iosched_entry = { 5518324aa91SJens Axboe .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 5528324aa91SJens Axboe .show = elv_iosched_show, 5538324aa91SJens Axboe .store = elv_iosched_store, 5548324aa91SJens Axboe }; 5558324aa91SJens Axboe 556e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 557e68b903cSMartin K. Petersen .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 558e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 559e1defc4fSMartin K. Petersen }; 560e1defc4fSMartin K. Petersen 561e1defc4fSMartin K. Petersen static struct queue_sysfs_entry queue_logical_block_size_entry = { 562e1defc4fSMartin K. Petersen .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 563e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 564e68b903cSMartin K. Petersen }; 565e68b903cSMartin K. Petersen 566c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_physical_block_size_entry = { 567c72758f3SMartin K. Petersen .attr = {.name = "physical_block_size", .mode = S_IRUGO }, 568c72758f3SMartin K. Petersen .show = queue_physical_block_size_show, 569c72758f3SMartin K. Petersen }; 570c72758f3SMartin K. Petersen 57187caf97cSHannes Reinecke static struct queue_sysfs_entry queue_chunk_sectors_entry = { 57287caf97cSHannes Reinecke .attr = {.name = "chunk_sectors", .mode = S_IRUGO }, 57387caf97cSHannes Reinecke .show = queue_chunk_sectors_show, 57487caf97cSHannes Reinecke }; 57587caf97cSHannes Reinecke 576c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_min_entry = { 577c72758f3SMartin K. Petersen .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, 578c72758f3SMartin K. Petersen .show = queue_io_min_show, 579c72758f3SMartin K. Petersen }; 580c72758f3SMartin K. Petersen 581c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_opt_entry = { 582c72758f3SMartin K. Petersen .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, 583c72758f3SMartin K. Petersen .show = queue_io_opt_show, 5848324aa91SJens Axboe }; 5858324aa91SJens Axboe 58686b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_granularity_entry = { 58786b37281SMartin K. Petersen .attr = {.name = "discard_granularity", .mode = S_IRUGO }, 58886b37281SMartin K. Petersen .show = queue_discard_granularity_show, 58986b37281SMartin K. Petersen }; 59086b37281SMartin K. Petersen 5910034af03SJens Axboe static struct queue_sysfs_entry queue_discard_max_hw_entry = { 5920034af03SJens Axboe .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, 5930034af03SJens Axboe .show = queue_discard_max_hw_show, 5940034af03SJens Axboe }; 5950034af03SJens Axboe 59686b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_max_entry = { 5970034af03SJens Axboe .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, 59886b37281SMartin K. Petersen .show = queue_discard_max_show, 5990034af03SJens Axboe .store = queue_discard_max_store, 60086b37281SMartin K. Petersen }; 60186b37281SMartin K. Petersen 60298262f27SMartin K. Petersen static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 60398262f27SMartin K. Petersen .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, 60498262f27SMartin K. Petersen .show = queue_discard_zeroes_data_show, 60598262f27SMartin K. Petersen }; 60698262f27SMartin K. Petersen 6074363ac7cSMartin K. Petersen static struct queue_sysfs_entry queue_write_same_max_entry = { 6084363ac7cSMartin K. Petersen .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, 6094363ac7cSMartin K. Petersen .show = queue_write_same_max_show, 6104363ac7cSMartin K. Petersen }; 6114363ac7cSMartin K. Petersen 612a6f0788eSChaitanya Kulkarni static struct queue_sysfs_entry queue_write_zeroes_max_entry = { 613a6f0788eSChaitanya Kulkarni .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO }, 614a6f0788eSChaitanya Kulkarni .show = queue_write_zeroes_max_show, 615a6f0788eSChaitanya Kulkarni }; 616a6f0788eSChaitanya Kulkarni 6171308835fSBartlomiej Zolnierkiewicz static struct queue_sysfs_entry queue_nonrot_entry = { 6181308835fSBartlomiej Zolnierkiewicz .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 619956bcb7cSJens Axboe .show = queue_show_nonrot, 620956bcb7cSJens Axboe .store = queue_store_nonrot, 6211308835fSBartlomiej Zolnierkiewicz }; 6221308835fSBartlomiej Zolnierkiewicz 623797476b8SDamien Le Moal static struct queue_sysfs_entry queue_zoned_entry = { 624797476b8SDamien Le Moal .attr = {.name = "zoned", .mode = S_IRUGO }, 625797476b8SDamien Le Moal .show = queue_zoned_show, 626797476b8SDamien Le Moal }; 627797476b8SDamien Le Moal 628ac9fafa1SAlan D. Brunelle static struct queue_sysfs_entry queue_nomerges_entry = { 629ac9fafa1SAlan D. Brunelle .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, 630ac9fafa1SAlan D. Brunelle .show = queue_nomerges_show, 631ac9fafa1SAlan D. Brunelle .store = queue_nomerges_store, 632ac9fafa1SAlan D. Brunelle }; 633ac9fafa1SAlan D. Brunelle 634c7c22e4dSJens Axboe static struct queue_sysfs_entry queue_rq_affinity_entry = { 635c7c22e4dSJens Axboe .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, 636c7c22e4dSJens Axboe .show = queue_rq_affinity_show, 637c7c22e4dSJens Axboe .store = queue_rq_affinity_store, 638c7c22e4dSJens Axboe }; 639c7c22e4dSJens Axboe 640bc58ba94SJens Axboe static struct queue_sysfs_entry queue_iostats_entry = { 641bc58ba94SJens Axboe .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 642956bcb7cSJens Axboe .show = queue_show_iostats, 643956bcb7cSJens Axboe .store = queue_store_iostats, 644bc58ba94SJens Axboe }; 645bc58ba94SJens Axboe 646e2e1a148SJens Axboe static struct queue_sysfs_entry queue_random_entry = { 647e2e1a148SJens Axboe .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, 648956bcb7cSJens Axboe .show = queue_show_random, 649956bcb7cSJens Axboe .store = queue_store_random, 650e2e1a148SJens Axboe }; 651e2e1a148SJens Axboe 65205229beeSJens Axboe static struct queue_sysfs_entry queue_poll_entry = { 65305229beeSJens Axboe .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, 65405229beeSJens Axboe .show = queue_poll_show, 65505229beeSJens Axboe .store = queue_poll_store, 65605229beeSJens Axboe }; 65705229beeSJens Axboe 65806426adfSJens Axboe static struct queue_sysfs_entry queue_poll_delay_entry = { 65906426adfSJens Axboe .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR }, 66006426adfSJens Axboe .show = queue_poll_delay_show, 66106426adfSJens Axboe .store = queue_poll_delay_store, 66206426adfSJens Axboe }; 66306426adfSJens Axboe 66493e9d8e8SJens Axboe static struct queue_sysfs_entry queue_wc_entry = { 66593e9d8e8SJens Axboe .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, 66693e9d8e8SJens Axboe .show = queue_wc_show, 66793e9d8e8SJens Axboe .store = queue_wc_store, 66893e9d8e8SJens Axboe }; 66993e9d8e8SJens Axboe 670ea6ca600SYigal Korman static struct queue_sysfs_entry queue_dax_entry = { 671ea6ca600SYigal Korman .attr = {.name = "dax", .mode = S_IRUGO }, 672ea6ca600SYigal Korman .show = queue_dax_show, 673ea6ca600SYigal Korman }; 674ea6ca600SYigal Korman 67587760e5eSJens Axboe static struct queue_sysfs_entry queue_wb_lat_entry = { 67687760e5eSJens Axboe .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR }, 67787760e5eSJens Axboe .show = queue_wb_lat_show, 67887760e5eSJens Axboe .store = queue_wb_lat_store, 67987760e5eSJens Axboe }; 68087760e5eSJens Axboe 681297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 682297e3d85SShaohua Li static struct queue_sysfs_entry throtl_sample_time_entry = { 683297e3d85SShaohua Li .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR }, 684297e3d85SShaohua Li .show = blk_throtl_sample_time_show, 685297e3d85SShaohua Li .store = blk_throtl_sample_time_store, 686297e3d85SShaohua Li }; 687297e3d85SShaohua Li #endif 688297e3d85SShaohua Li 6898324aa91SJens Axboe static struct attribute *default_attrs[] = { 6908324aa91SJens Axboe &queue_requests_entry.attr, 6918324aa91SJens Axboe &queue_ra_entry.attr, 6928324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 6938324aa91SJens Axboe &queue_max_sectors_entry.attr, 694c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 6951e739730SChristoph Hellwig &queue_max_discard_segments_entry.attr, 69613f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 697c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 6988324aa91SJens Axboe &queue_iosched_entry.attr, 699e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 700e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 701c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 70287caf97cSHannes Reinecke &queue_chunk_sectors_entry.attr, 703c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 704c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 70586b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 70686b37281SMartin K. Petersen &queue_discard_max_entry.attr, 7070034af03SJens Axboe &queue_discard_max_hw_entry.attr, 70898262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 7094363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr, 710a6f0788eSChaitanya Kulkarni &queue_write_zeroes_max_entry.attr, 7111308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 712797476b8SDamien Le Moal &queue_zoned_entry.attr, 713ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 714c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 715bc58ba94SJens Axboe &queue_iostats_entry.attr, 716e2e1a148SJens Axboe &queue_random_entry.attr, 71705229beeSJens Axboe &queue_poll_entry.attr, 71893e9d8e8SJens Axboe &queue_wc_entry.attr, 719ea6ca600SYigal Korman &queue_dax_entry.attr, 72087760e5eSJens Axboe &queue_wb_lat_entry.attr, 72106426adfSJens Axboe &queue_poll_delay_entry.attr, 722297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 723297e3d85SShaohua Li &throtl_sample_time_entry.attr, 724297e3d85SShaohua Li #endif 7258324aa91SJens Axboe NULL, 7268324aa91SJens Axboe }; 7278324aa91SJens Axboe 7288324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 7298324aa91SJens Axboe 7308324aa91SJens Axboe static ssize_t 7318324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 7328324aa91SJens Axboe { 7338324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7348324aa91SJens Axboe struct request_queue *q = 7358324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 7368324aa91SJens Axboe ssize_t res; 7378324aa91SJens Axboe 7388324aa91SJens Axboe if (!entry->show) 7398324aa91SJens Axboe return -EIO; 7408324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7413f3299d5SBart Van Assche if (blk_queue_dying(q)) { 7428324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7438324aa91SJens Axboe return -ENOENT; 7448324aa91SJens Axboe } 7458324aa91SJens Axboe res = entry->show(q, page); 7468324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7478324aa91SJens Axboe return res; 7488324aa91SJens Axboe } 7498324aa91SJens Axboe 7508324aa91SJens Axboe static ssize_t 7518324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 7528324aa91SJens Axboe const char *page, size_t length) 7538324aa91SJens Axboe { 7548324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7556728cb0eSJens Axboe struct request_queue *q; 7568324aa91SJens Axboe ssize_t res; 7578324aa91SJens Axboe 7588324aa91SJens Axboe if (!entry->store) 7598324aa91SJens Axboe return -EIO; 7606728cb0eSJens Axboe 7616728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 7628324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7633f3299d5SBart Van Assche if (blk_queue_dying(q)) { 7648324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7658324aa91SJens Axboe return -ENOENT; 7668324aa91SJens Axboe } 7678324aa91SJens Axboe res = entry->store(q, page, length); 7688324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7698324aa91SJens Axboe return res; 7708324aa91SJens Axboe } 7718324aa91SJens Axboe 772548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head) 773548bc8e1STejun Heo { 774548bc8e1STejun Heo struct request_queue *q = container_of(rcu_head, struct request_queue, 775548bc8e1STejun Heo rcu_head); 776548bc8e1STejun Heo kmem_cache_free(blk_requestq_cachep, q); 777548bc8e1STejun Heo } 778548bc8e1STejun Heo 7798324aa91SJens Axboe /** 780dc9edc44SBart Van Assche * __blk_release_queue - release a request queue when it is no longer needed 781dc9edc44SBart Van Assche * @work: pointer to the release_work member of the request queue to be released 7828324aa91SJens Axboe * 7838324aa91SJens Axboe * Description: 784dc9edc44SBart Van Assche * blk_release_queue is the counterpart of blk_init_queue(). It should be 785dc9edc44SBart Van Assche * called when a request queue is being released; typically when a block 786dc9edc44SBart Van Assche * device is being de-registered. Its primary task it to free the queue 787dc9edc44SBart Van Assche * itself. 7888324aa91SJens Axboe * 789dc9edc44SBart Van Assche * Notes: 79045a9c9d9SBart Van Assche * The low level driver must have finished any outstanding requests first 79145a9c9d9SBart Van Assche * via blk_cleanup_queue(). 792dc9edc44SBart Van Assche * 793dc9edc44SBart Van Assche * Although blk_release_queue() may be called with preemption disabled, 794dc9edc44SBart Van Assche * __blk_release_queue() may sleep. 795dc9edc44SBart Van Assche */ 796dc9edc44SBart Van Assche static void __blk_release_queue(struct work_struct *work) 7978324aa91SJens Axboe { 798dc9edc44SBart Van Assche struct request_queue *q = container_of(work, typeof(*q), release_work); 7998324aa91SJens Axboe 80034dbad5dSOmar Sandoval if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 80134dbad5dSOmar Sandoval blk_stat_remove_callback(q, q->poll_cb); 80234dbad5dSOmar Sandoval blk_stat_free_callback(q->poll_cb); 803d03f6cdcSJan Kara bdi_put(q->backing_dev_info); 804e8989faeSTejun Heo blkcg_exit_queue(q); 805e8989faeSTejun Heo 8067e5a8794STejun Heo if (q->elevator) { 8077e5a8794STejun Heo ioc_clear_queue(q); 80854d5329dSOmar Sandoval elevator_exit(q, q->elevator); 8097e5a8794STejun Heo } 810777eb1bfSHannes Reinecke 81134dbad5dSOmar Sandoval blk_free_queue_stats(q->stats); 81234dbad5dSOmar Sandoval 813b425e504SBart Van Assche blk_exit_rl(q, &q->root_rl); 8148324aa91SJens Axboe 8158324aa91SJens Axboe if (q->queue_tags) 8168324aa91SJens Axboe __blk_queue_free_tags(q); 8178324aa91SJens Axboe 8186d247d7fSChristoph Hellwig if (!q->mq_ops) { 8196d247d7fSChristoph Hellwig if (q->exit_rq_fn) 8206d247d7fSChristoph Hellwig q->exit_rq_fn(q, q->fq->flush_rq); 821f70ced09SMing Lei blk_free_flush_queue(q->fq); 8226d247d7fSChristoph Hellwig } else { 823e09aae7eSMing Lei blk_mq_release(q); 8246d247d7fSChristoph Hellwig } 82518741986SChristoph Hellwig 8268324aa91SJens Axboe blk_trace_shutdown(q); 8278324aa91SJens Axboe 82862ebce16SOmar Sandoval if (q->mq_ops) 82962ebce16SOmar Sandoval blk_mq_debugfs_unregister(q); 83062ebce16SOmar Sandoval 83154efd50bSKent Overstreet if (q->bio_split) 83254efd50bSKent Overstreet bioset_free(q->bio_split); 83354efd50bSKent Overstreet 834a73f730dSTejun Heo ida_simple_remove(&blk_queue_ida, q->id); 835548bc8e1STejun Heo call_rcu(&q->rcu_head, blk_free_queue_rcu); 8368324aa91SJens Axboe } 8378324aa91SJens Axboe 838dc9edc44SBart Van Assche static void blk_release_queue(struct kobject *kobj) 839dc9edc44SBart Van Assche { 840dc9edc44SBart Van Assche struct request_queue *q = 841dc9edc44SBart Van Assche container_of(kobj, struct request_queue, kobj); 842dc9edc44SBart Van Assche 843dc9edc44SBart Van Assche INIT_WORK(&q->release_work, __blk_release_queue); 844dc9edc44SBart Van Assche schedule_work(&q->release_work); 845dc9edc44SBart Van Assche } 846dc9edc44SBart Van Assche 84752cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 8488324aa91SJens Axboe .show = queue_attr_show, 8498324aa91SJens Axboe .store = queue_attr_store, 8508324aa91SJens Axboe }; 8518324aa91SJens Axboe 8528324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 8538324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 8548324aa91SJens Axboe .default_attrs = default_attrs, 8558324aa91SJens Axboe .release = blk_release_queue, 8568324aa91SJens Axboe }; 8578324aa91SJens Axboe 8588324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 8598324aa91SJens Axboe { 8608324aa91SJens Axboe int ret; 8611d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 8628324aa91SJens Axboe struct request_queue *q = disk->queue; 8638324aa91SJens Axboe 864fb199746SAkinobu Mita if (WARN_ON(!q)) 8658324aa91SJens Axboe return -ENXIO; 8668324aa91SJens Axboe 867334335d2SOmar Sandoval WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), 868334335d2SOmar Sandoval "%s is registering an already registered queue\n", 869334335d2SOmar Sandoval kobject_name(&dev->kobj)); 870334335d2SOmar Sandoval queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); 871334335d2SOmar Sandoval 872749fefe6STejun Heo /* 87317497acbSTejun Heo * SCSI probing may synchronously create and destroy a lot of 87417497acbSTejun Heo * request_queues for non-existent devices. Shutting down a fully 87517497acbSTejun Heo * functional queue takes measureable wallclock time as RCU grace 87617497acbSTejun Heo * periods are involved. To avoid excessive latency in these 87717497acbSTejun Heo * cases, a request_queue starts out in a degraded mode which is 87817497acbSTejun Heo * faster to shut down and is made fully functional here as 87917497acbSTejun Heo * request_queues for non-existent devices never get registered. 880749fefe6STejun Heo */ 881df35c7c9SAlan Stern if (!blk_queue_init_done(q)) { 882320ae51fSJens Axboe queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); 8833ef28e83SDan Williams percpu_ref_switch_to_percpu(&q->q_usage_counter); 884776687bcSTejun Heo blk_queue_bypass_end(q); 885df35c7c9SAlan Stern } 886749fefe6STejun Heo 8871d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 8881d54ad6dSLi Zefan if (ret) 8891d54ad6dSLi Zefan return ret; 8901d54ad6dSLi Zefan 891b410aff2STahsin Erdogan /* Prevent changes through sysfs until registration is completed. */ 892b410aff2STahsin Erdogan mutex_lock(&q->sysfs_lock); 893b410aff2STahsin Erdogan 894c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 895ed5302d3SLiu Yuan if (ret < 0) { 896ed5302d3SLiu Yuan blk_trace_remove_sysfs(dev); 897b410aff2STahsin Erdogan goto unlock; 898ed5302d3SLiu Yuan } 8998324aa91SJens Axboe 900a8ecdd71SBart Van Assche if (q->mq_ops) { 9012d0364c8SBart Van Assche __blk_mq_register_dev(dev, q); 9029c1051aaSOmar Sandoval blk_mq_debugfs_register(q); 903a8ecdd71SBart Van Assche } 9049c1051aaSOmar Sandoval 9058324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_ADD); 9068324aa91SJens Axboe 9078330cdb0SJan Kara wbt_enable_default(q); 90887760e5eSJens Axboe 909d61fcfa4SShaohua Li blk_throtl_register_queue(q); 910d61fcfa4SShaohua Li 91180c6b157SOmar Sandoval if (q->request_fn || (q->mq_ops && q->elevator)) { 9128324aa91SJens Axboe ret = elv_register_queue(q); 9138324aa91SJens Axboe if (ret) { 9148324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9158324aa91SJens Axboe kobject_del(&q->kobj); 91680656b67SLiu Yuan blk_trace_remove_sysfs(dev); 917c87ffbb8SXiaotian Feng kobject_put(&dev->kobj); 918b410aff2STahsin Erdogan goto unlock; 919b410aff2STahsin Erdogan } 920b410aff2STahsin Erdogan } 921b410aff2STahsin Erdogan ret = 0; 922b410aff2STahsin Erdogan unlock: 923b410aff2STahsin Erdogan mutex_unlock(&q->sysfs_lock); 9248324aa91SJens Axboe return ret; 9258324aa91SJens Axboe } 9268324aa91SJens Axboe 9278324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 9288324aa91SJens Axboe { 9298324aa91SJens Axboe struct request_queue *q = disk->queue; 9308324aa91SJens Axboe 931fb199746SAkinobu Mita if (WARN_ON(!q)) 932fb199746SAkinobu Mita return; 933fb199746SAkinobu Mita 934*e9a823fbSDavid Jeffery mutex_lock(&q->sysfs_lock); 935334335d2SOmar Sandoval queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q); 936*e9a823fbSDavid Jeffery mutex_unlock(&q->sysfs_lock); 937334335d2SOmar Sandoval 93802ba8893SOmar Sandoval wbt_exit(q); 93902ba8893SOmar Sandoval 94002ba8893SOmar Sandoval 941320ae51fSJens Axboe if (q->mq_ops) 942b21d5b30SMatias Bjørling blk_mq_unregister_dev(disk_to_dev(disk), q); 943320ae51fSJens Axboe 94480c6b157SOmar Sandoval if (q->request_fn || (q->mq_ops && q->elevator)) 9458324aa91SJens Axboe elv_unregister_queue(q); 9468324aa91SJens Axboe 9478324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9488324aa91SJens Axboe kobject_del(&q->kobj); 94948c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 950ed9e1982STejun Heo kobject_put(&disk_to_dev(disk)->kobj); 9518324aa91SJens Axboe } 952