18324aa91SJens Axboe /* 28324aa91SJens Axboe * Functions related to sysfs handling 38324aa91SJens Axboe */ 48324aa91SJens Axboe #include <linux/kernel.h> 55a0e3ad6STejun Heo #include <linux/slab.h> 68324aa91SJens Axboe #include <linux/module.h> 78324aa91SJens Axboe #include <linux/bio.h> 88324aa91SJens Axboe #include <linux/blkdev.h> 966114cadSTejun Heo #include <linux/backing-dev.h> 108324aa91SJens Axboe #include <linux/blktrace_api.h> 11320ae51fSJens Axboe #include <linux/blk-mq.h> 12eea8f41cSTejun Heo #include <linux/blk-cgroup.h> 138324aa91SJens Axboe 148324aa91SJens Axboe #include "blk.h" 153edcc0ceSMing Lei #include "blk-mq.h" 1687760e5eSJens Axboe #include "blk-wbt.h" 178324aa91SJens Axboe 188324aa91SJens Axboe struct queue_sysfs_entry { 198324aa91SJens Axboe struct attribute attr; 208324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 218324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 228324aa91SJens Axboe }; 238324aa91SJens Axboe 248324aa91SJens Axboe static ssize_t 259cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 268324aa91SJens Axboe { 279cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 288324aa91SJens Axboe } 298324aa91SJens Axboe 308324aa91SJens Axboe static ssize_t 318324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 328324aa91SJens Axboe { 33b1f3b64dSDave Reisner int err; 34b1f3b64dSDave Reisner unsigned long v; 358324aa91SJens Axboe 36ed751e68SJingoo Han err = kstrtoul(page, 10, &v); 37b1f3b64dSDave Reisner if (err || v > UINT_MAX) 38b1f3b64dSDave Reisner return -EINVAL; 39b1f3b64dSDave Reisner 40b1f3b64dSDave Reisner *var = v; 41b1f3b64dSDave Reisner 428324aa91SJens Axboe return count; 438324aa91SJens Axboe } 448324aa91SJens Axboe 4580e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page) 4687760e5eSJens Axboe { 4787760e5eSJens Axboe int err; 4880e091d1SJens Axboe s64 v; 4987760e5eSJens Axboe 5080e091d1SJens Axboe err = kstrtos64(page, 10, &v); 5187760e5eSJens Axboe if (err < 0) 5287760e5eSJens Axboe return err; 5387760e5eSJens Axboe 5487760e5eSJens Axboe *var = v; 5587760e5eSJens Axboe return 0; 5687760e5eSJens Axboe } 5787760e5eSJens Axboe 588324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 598324aa91SJens Axboe { 608324aa91SJens Axboe return queue_var_show(q->nr_requests, (page)); 618324aa91SJens Axboe } 628324aa91SJens Axboe 638324aa91SJens Axboe static ssize_t 648324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 658324aa91SJens Axboe { 668324aa91SJens Axboe unsigned long nr; 67e3a2b3f9SJens Axboe int ret, err; 68b8a9ae77SJens Axboe 69e3a2b3f9SJens Axboe if (!q->request_fn && !q->mq_ops) 70b8a9ae77SJens Axboe return -EINVAL; 71b8a9ae77SJens Axboe 72b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 73b1f3b64dSDave Reisner if (ret < 0) 74b1f3b64dSDave Reisner return ret; 75b1f3b64dSDave Reisner 768324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 778324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 788324aa91SJens Axboe 79e3a2b3f9SJens Axboe if (q->request_fn) 80e3a2b3f9SJens Axboe err = blk_update_nr_requests(q, nr); 81e3a2b3f9SJens Axboe else 82e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr); 838324aa91SJens Axboe 84e3a2b3f9SJens Axboe if (err) 85e3a2b3f9SJens Axboe return err; 86a051661cSTejun Heo 878324aa91SJens Axboe return ret; 888324aa91SJens Axboe } 898324aa91SJens Axboe 908324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 918324aa91SJens Axboe { 92dc3b17ccSJan Kara unsigned long ra_kb = q->backing_dev_info->ra_pages << 9309cbfeafSKirill A. Shutemov (PAGE_SHIFT - 10); 948324aa91SJens Axboe 958324aa91SJens Axboe return queue_var_show(ra_kb, (page)); 968324aa91SJens Axboe } 978324aa91SJens Axboe 988324aa91SJens Axboe static ssize_t 998324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 1008324aa91SJens Axboe { 1018324aa91SJens Axboe unsigned long ra_kb; 1028324aa91SJens Axboe ssize_t ret = queue_var_store(&ra_kb, page, count); 1038324aa91SJens Axboe 104b1f3b64dSDave Reisner if (ret < 0) 105b1f3b64dSDave Reisner return ret; 106b1f3b64dSDave Reisner 107dc3b17ccSJan Kara q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 1088324aa91SJens Axboe 1098324aa91SJens Axboe return ret; 1108324aa91SJens Axboe } 1118324aa91SJens Axboe 1128324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1138324aa91SJens Axboe { 114ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1158324aa91SJens Axboe 1168324aa91SJens Axboe return queue_var_show(max_sectors_kb, (page)); 1178324aa91SJens Axboe } 1188324aa91SJens Axboe 119c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 120c77a5710SMartin K. Petersen { 121c77a5710SMartin K. Petersen return queue_var_show(queue_max_segments(q), (page)); 122c77a5710SMartin K. Petersen } 123c77a5710SMartin K. Petersen 1241e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q, 1251e739730SChristoph Hellwig char *page) 1261e739730SChristoph Hellwig { 1271e739730SChristoph Hellwig return queue_var_show(queue_max_discard_segments(q), (page)); 1281e739730SChristoph Hellwig } 1291e739730SChristoph Hellwig 13013f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 13113f05c8dSMartin K. Petersen { 13213f05c8dSMartin K. Petersen return queue_var_show(q->limits.max_integrity_segments, (page)); 13313f05c8dSMartin K. Petersen } 13413f05c8dSMartin K. Petersen 135c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 136c77a5710SMartin K. Petersen { 137e692cb66SMartin K. Petersen if (blk_queue_cluster(q)) 138c77a5710SMartin K. Petersen return queue_var_show(queue_max_segment_size(q), (page)); 139c77a5710SMartin K. Petersen 14009cbfeafSKirill A. Shutemov return queue_var_show(PAGE_SIZE, (page)); 141c77a5710SMartin K. Petersen } 142c77a5710SMartin K. Petersen 143e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 144e68b903cSMartin K. Petersen { 145e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 146e68b903cSMartin K. Petersen } 147e68b903cSMartin K. Petersen 148c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 149c72758f3SMartin K. Petersen { 150c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 151c72758f3SMartin K. Petersen } 152c72758f3SMartin K. Petersen 15387caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 15487caf97cSHannes Reinecke { 15587caf97cSHannes Reinecke return queue_var_show(q->limits.chunk_sectors, page); 15687caf97cSHannes Reinecke } 15787caf97cSHannes Reinecke 158c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 159c72758f3SMartin K. Petersen { 160c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 161c72758f3SMartin K. Petersen } 162c72758f3SMartin K. Petersen 163c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 164c72758f3SMartin K. Petersen { 165c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1668324aa91SJens Axboe } 1678324aa91SJens Axboe 16886b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 16986b37281SMartin K. Petersen { 17086b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 17186b37281SMartin K. Petersen } 17286b37281SMartin K. Petersen 1730034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 1740034af03SJens Axboe { 1750034af03SJens Axboe 17618f922d0SAlan return sprintf(page, "%llu\n", 17718f922d0SAlan (unsigned long long)q->limits.max_hw_discard_sectors << 9); 1780034af03SJens Axboe } 1790034af03SJens Axboe 18086b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 18186b37281SMartin K. Petersen { 182a934a00aSMartin K. Petersen return sprintf(page, "%llu\n", 183a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9); 18486b37281SMartin K. Petersen } 18586b37281SMartin K. Petersen 1860034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q, 1870034af03SJens Axboe const char *page, size_t count) 1880034af03SJens Axboe { 1890034af03SJens Axboe unsigned long max_discard; 1900034af03SJens Axboe ssize_t ret = queue_var_store(&max_discard, page, count); 1910034af03SJens Axboe 1920034af03SJens Axboe if (ret < 0) 1930034af03SJens Axboe return ret; 1940034af03SJens Axboe 1950034af03SJens Axboe if (max_discard & (q->limits.discard_granularity - 1)) 1960034af03SJens Axboe return -EINVAL; 1970034af03SJens Axboe 1980034af03SJens Axboe max_discard >>= 9; 1990034af03SJens Axboe if (max_discard > UINT_MAX) 2000034af03SJens Axboe return -EINVAL; 2010034af03SJens Axboe 2020034af03SJens Axboe if (max_discard > q->limits.max_hw_discard_sectors) 2030034af03SJens Axboe max_discard = q->limits.max_hw_discard_sectors; 2040034af03SJens Axboe 2050034af03SJens Axboe q->limits.max_discard_sectors = max_discard; 2060034af03SJens Axboe return ret; 2070034af03SJens Axboe } 2080034af03SJens Axboe 20998262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 21098262f27SMartin K. Petersen { 21198262f27SMartin K. Petersen return queue_var_show(queue_discard_zeroes_data(q), page); 21298262f27SMartin K. Petersen } 21398262f27SMartin K. Petersen 2144363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 2154363ac7cSMartin K. Petersen { 2164363ac7cSMartin K. Petersen return sprintf(page, "%llu\n", 2174363ac7cSMartin K. Petersen (unsigned long long)q->limits.max_write_same_sectors << 9); 2184363ac7cSMartin K. Petersen } 2194363ac7cSMartin K. Petersen 220a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 221a6f0788eSChaitanya Kulkarni { 222a6f0788eSChaitanya Kulkarni return sprintf(page, "%llu\n", 223a6f0788eSChaitanya Kulkarni (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 224a6f0788eSChaitanya Kulkarni } 2254363ac7cSMartin K. Petersen 2268324aa91SJens Axboe static ssize_t 2278324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 2288324aa91SJens Axboe { 2298324aa91SJens Axboe unsigned long max_sectors_kb, 230ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 23109cbfeafSKirill A. Shutemov page_kb = 1 << (PAGE_SHIFT - 10); 2328324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 2338324aa91SJens Axboe 234b1f3b64dSDave Reisner if (ret < 0) 235b1f3b64dSDave Reisner return ret; 236b1f3b64dSDave Reisner 237ca369d51SMartin K. Petersen max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 238ca369d51SMartin K. Petersen q->limits.max_dev_sectors >> 1); 239ca369d51SMartin K. Petersen 2408324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 2418324aa91SJens Axboe return -EINVAL; 2427c239517SWu Fengguang 2438324aa91SJens Axboe spin_lock_irq(q->queue_lock); 244c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 245dc3b17ccSJan Kara q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 2468324aa91SJens Axboe spin_unlock_irq(q->queue_lock); 2478324aa91SJens Axboe 2488324aa91SJens Axboe return ret; 2498324aa91SJens Axboe } 2508324aa91SJens Axboe 2518324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 2528324aa91SJens Axboe { 253ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 2548324aa91SJens Axboe 2558324aa91SJens Axboe return queue_var_show(max_hw_sectors_kb, (page)); 2568324aa91SJens Axboe } 2578324aa91SJens Axboe 258956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 259956bcb7cSJens Axboe static ssize_t \ 260956bcb7cSJens Axboe queue_show_##name(struct request_queue *q, char *page) \ 261956bcb7cSJens Axboe { \ 262956bcb7cSJens Axboe int bit; \ 263956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 264956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 265956bcb7cSJens Axboe } \ 266956bcb7cSJens Axboe static ssize_t \ 267956bcb7cSJens Axboe queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 268956bcb7cSJens Axboe { \ 269956bcb7cSJens Axboe unsigned long val; \ 270956bcb7cSJens Axboe ssize_t ret; \ 271956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 272c678ef52SArnd Bergmann if (ret < 0) \ 273c678ef52SArnd Bergmann return ret; \ 274956bcb7cSJens Axboe if (neg) \ 275956bcb7cSJens Axboe val = !val; \ 276956bcb7cSJens Axboe \ 277956bcb7cSJens Axboe spin_lock_irq(q->queue_lock); \ 278956bcb7cSJens Axboe if (val) \ 279956bcb7cSJens Axboe queue_flag_set(QUEUE_FLAG_##flag, q); \ 280956bcb7cSJens Axboe else \ 281956bcb7cSJens Axboe queue_flag_clear(QUEUE_FLAG_##flag, q); \ 282956bcb7cSJens Axboe spin_unlock_irq(q->queue_lock); \ 283956bcb7cSJens Axboe return ret; \ 2841308835fSBartlomiej Zolnierkiewicz } 2851308835fSBartlomiej Zolnierkiewicz 286956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 287956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 288956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 289956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 2901308835fSBartlomiej Zolnierkiewicz 291797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page) 292797476b8SDamien Le Moal { 293797476b8SDamien Le Moal switch (blk_queue_zoned_model(q)) { 294797476b8SDamien Le Moal case BLK_ZONED_HA: 295797476b8SDamien Le Moal return sprintf(page, "host-aware\n"); 296797476b8SDamien Le Moal case BLK_ZONED_HM: 297797476b8SDamien Le Moal return sprintf(page, "host-managed\n"); 298797476b8SDamien Le Moal default: 299797476b8SDamien Le Moal return sprintf(page, "none\n"); 300797476b8SDamien Le Moal } 301797476b8SDamien Le Moal } 302797476b8SDamien Le Moal 303ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 304ac9fafa1SAlan D. Brunelle { 305488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 306488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 307ac9fafa1SAlan D. Brunelle } 308ac9fafa1SAlan D. Brunelle 309ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 310ac9fafa1SAlan D. Brunelle size_t count) 311ac9fafa1SAlan D. Brunelle { 312ac9fafa1SAlan D. Brunelle unsigned long nm; 313ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 314ac9fafa1SAlan D. Brunelle 315b1f3b64dSDave Reisner if (ret < 0) 316b1f3b64dSDave Reisner return ret; 317b1f3b64dSDave Reisner 318bf0f9702SJens Axboe spin_lock_irq(q->queue_lock); 319bf0f9702SJens Axboe queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 320488991e2SAlan D. Brunelle queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 321488991e2SAlan D. Brunelle if (nm == 2) 322488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOMERGES, q); 323488991e2SAlan D. Brunelle else if (nm) 324488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 325bf0f9702SJens Axboe spin_unlock_irq(q->queue_lock); 3261308835fSBartlomiej Zolnierkiewicz 327ac9fafa1SAlan D. Brunelle return ret; 328ac9fafa1SAlan D. Brunelle } 329ac9fafa1SAlan D. Brunelle 330c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 331c7c22e4dSJens Axboe { 3329cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 3335757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 334c7c22e4dSJens Axboe 3355757a6d7SDan Williams return queue_var_show(set << force, page); 336c7c22e4dSJens Axboe } 337c7c22e4dSJens Axboe 338c7c22e4dSJens Axboe static ssize_t 339c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 340c7c22e4dSJens Axboe { 341c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 3420a06ff06SChristoph Hellwig #ifdef CONFIG_SMP 343c7c22e4dSJens Axboe unsigned long val; 344c7c22e4dSJens Axboe 345c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 346b1f3b64dSDave Reisner if (ret < 0) 347b1f3b64dSDave Reisner return ret; 348b1f3b64dSDave Reisner 349c7c22e4dSJens Axboe spin_lock_irq(q->queue_lock); 350e8037d49SEric Seppanen if (val == 2) { 351c7c22e4dSJens Axboe queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 3525757a6d7SDan Williams queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 353e8037d49SEric Seppanen } else if (val == 1) { 354e8037d49SEric Seppanen queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 355e8037d49SEric Seppanen queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 356e8037d49SEric Seppanen } else if (val == 0) { 357c7c22e4dSJens Axboe queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 3585757a6d7SDan Williams queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 3595757a6d7SDan Williams } 360c7c22e4dSJens Axboe spin_unlock_irq(q->queue_lock); 361c7c22e4dSJens Axboe #endif 362c7c22e4dSJens Axboe return ret; 363c7c22e4dSJens Axboe } 3648324aa91SJens Axboe 36506426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 36606426adfSJens Axboe { 36764f1c21eSJens Axboe int val; 36864f1c21eSJens Axboe 36964f1c21eSJens Axboe if (q->poll_nsec == -1) 37064f1c21eSJens Axboe val = -1; 37164f1c21eSJens Axboe else 37264f1c21eSJens Axboe val = q->poll_nsec / 1000; 37364f1c21eSJens Axboe 37464f1c21eSJens Axboe return sprintf(page, "%d\n", val); 37506426adfSJens Axboe } 37606426adfSJens Axboe 37706426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 37806426adfSJens Axboe size_t count) 37906426adfSJens Axboe { 38064f1c21eSJens Axboe int err, val; 38106426adfSJens Axboe 38206426adfSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 38306426adfSJens Axboe return -EINVAL; 38406426adfSJens Axboe 38564f1c21eSJens Axboe err = kstrtoint(page, 10, &val); 38664f1c21eSJens Axboe if (err < 0) 38764f1c21eSJens Axboe return err; 38806426adfSJens Axboe 38964f1c21eSJens Axboe if (val == -1) 39064f1c21eSJens Axboe q->poll_nsec = -1; 39164f1c21eSJens Axboe else 39264f1c21eSJens Axboe q->poll_nsec = val * 1000; 39364f1c21eSJens Axboe 39464f1c21eSJens Axboe return count; 39506426adfSJens Axboe } 39606426adfSJens Axboe 39705229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page) 39805229beeSJens Axboe { 39905229beeSJens Axboe return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 40005229beeSJens Axboe } 40105229beeSJens Axboe 40205229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page, 40305229beeSJens Axboe size_t count) 40405229beeSJens Axboe { 40505229beeSJens Axboe unsigned long poll_on; 40605229beeSJens Axboe ssize_t ret; 40705229beeSJens Axboe 40805229beeSJens Axboe if (!q->mq_ops || !q->mq_ops->poll) 40905229beeSJens Axboe return -EINVAL; 41005229beeSJens Axboe 41105229beeSJens Axboe ret = queue_var_store(&poll_on, page, count); 41205229beeSJens Axboe if (ret < 0) 41305229beeSJens Axboe return ret; 41405229beeSJens Axboe 41505229beeSJens Axboe spin_lock_irq(q->queue_lock); 41605229beeSJens Axboe if (poll_on) 41705229beeSJens Axboe queue_flag_set(QUEUE_FLAG_POLL, q); 41805229beeSJens Axboe else 41905229beeSJens Axboe queue_flag_clear(QUEUE_FLAG_POLL, q); 42005229beeSJens Axboe spin_unlock_irq(q->queue_lock); 42105229beeSJens Axboe 42205229beeSJens Axboe return ret; 42305229beeSJens Axboe } 42405229beeSJens Axboe 42587760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 42687760e5eSJens Axboe { 42787760e5eSJens Axboe if (!q->rq_wb) 42887760e5eSJens Axboe return -EINVAL; 42987760e5eSJens Axboe 43087760e5eSJens Axboe return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000)); 43187760e5eSJens Axboe } 43287760e5eSJens Axboe 43387760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 43487760e5eSJens Axboe size_t count) 43587760e5eSJens Axboe { 43680e091d1SJens Axboe struct rq_wb *rwb; 43787760e5eSJens Axboe ssize_t ret; 43880e091d1SJens Axboe s64 val; 43987760e5eSJens Axboe 44087760e5eSJens Axboe ret = queue_var_store64(&val, page); 44187760e5eSJens Axboe if (ret < 0) 44287760e5eSJens Axboe return ret; 443d62118b6SJens Axboe if (val < -1) 444d62118b6SJens Axboe return -EINVAL; 445d62118b6SJens Axboe 446d62118b6SJens Axboe rwb = q->rq_wb; 447d62118b6SJens Axboe if (!rwb) { 448d62118b6SJens Axboe ret = wbt_init(q); 449d62118b6SJens Axboe if (ret) 450d62118b6SJens Axboe return ret; 451d62118b6SJens Axboe 452d62118b6SJens Axboe rwb = q->rq_wb; 453d62118b6SJens Axboe if (!rwb) 454d62118b6SJens Axboe return -EINVAL; 455d62118b6SJens Axboe } 45687760e5eSJens Axboe 45780e091d1SJens Axboe if (val == -1) 45880e091d1SJens Axboe rwb->min_lat_nsec = wbt_default_latency_nsec(q); 45980e091d1SJens Axboe else if (val >= 0) 46080e091d1SJens Axboe rwb->min_lat_nsec = val * 1000ULL; 461d62118b6SJens Axboe 462d62118b6SJens Axboe if (rwb->enable_state == WBT_STATE_ON_DEFAULT) 463d62118b6SJens Axboe rwb->enable_state = WBT_STATE_ON_MANUAL; 46480e091d1SJens Axboe 46580e091d1SJens Axboe wbt_update_limits(rwb); 46687760e5eSJens Axboe return count; 46787760e5eSJens Axboe } 46887760e5eSJens Axboe 46993e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page) 47093e9d8e8SJens Axboe { 47193e9d8e8SJens Axboe if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 47293e9d8e8SJens Axboe return sprintf(page, "write back\n"); 47393e9d8e8SJens Axboe 47493e9d8e8SJens Axboe return sprintf(page, "write through\n"); 47593e9d8e8SJens Axboe } 47693e9d8e8SJens Axboe 47793e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page, 47893e9d8e8SJens Axboe size_t count) 47993e9d8e8SJens Axboe { 48093e9d8e8SJens Axboe int set = -1; 48193e9d8e8SJens Axboe 48293e9d8e8SJens Axboe if (!strncmp(page, "write back", 10)) 48393e9d8e8SJens Axboe set = 1; 48493e9d8e8SJens Axboe else if (!strncmp(page, "write through", 13) || 48593e9d8e8SJens Axboe !strncmp(page, "none", 4)) 48693e9d8e8SJens Axboe set = 0; 48793e9d8e8SJens Axboe 48893e9d8e8SJens Axboe if (set == -1) 48993e9d8e8SJens Axboe return -EINVAL; 49093e9d8e8SJens Axboe 49193e9d8e8SJens Axboe spin_lock_irq(q->queue_lock); 49293e9d8e8SJens Axboe if (set) 49393e9d8e8SJens Axboe queue_flag_set(QUEUE_FLAG_WC, q); 49493e9d8e8SJens Axboe else 49593e9d8e8SJens Axboe queue_flag_clear(QUEUE_FLAG_WC, q); 49693e9d8e8SJens Axboe spin_unlock_irq(q->queue_lock); 49793e9d8e8SJens Axboe 49893e9d8e8SJens Axboe return count; 49993e9d8e8SJens Axboe } 50093e9d8e8SJens Axboe 501ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page) 502ea6ca600SYigal Korman { 503ea6ca600SYigal Korman return queue_var_show(blk_queue_dax(q), page); 504ea6ca600SYigal Korman } 505ea6ca600SYigal Korman 5068324aa91SJens Axboe static struct queue_sysfs_entry queue_requests_entry = { 5078324aa91SJens Axboe .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 5088324aa91SJens Axboe .show = queue_requests_show, 5098324aa91SJens Axboe .store = queue_requests_store, 5108324aa91SJens Axboe }; 5118324aa91SJens Axboe 5128324aa91SJens Axboe static struct queue_sysfs_entry queue_ra_entry = { 5138324aa91SJens Axboe .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, 5148324aa91SJens Axboe .show = queue_ra_show, 5158324aa91SJens Axboe .store = queue_ra_store, 5168324aa91SJens Axboe }; 5178324aa91SJens Axboe 5188324aa91SJens Axboe static struct queue_sysfs_entry queue_max_sectors_entry = { 5198324aa91SJens Axboe .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, 5208324aa91SJens Axboe .show = queue_max_sectors_show, 5218324aa91SJens Axboe .store = queue_max_sectors_store, 5228324aa91SJens Axboe }; 5238324aa91SJens Axboe 5248324aa91SJens Axboe static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 5258324aa91SJens Axboe .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, 5268324aa91SJens Axboe .show = queue_max_hw_sectors_show, 5278324aa91SJens Axboe }; 5288324aa91SJens Axboe 529c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segments_entry = { 530c77a5710SMartin K. Petersen .attr = {.name = "max_segments", .mode = S_IRUGO }, 531c77a5710SMartin K. Petersen .show = queue_max_segments_show, 532c77a5710SMartin K. Petersen }; 533c77a5710SMartin K. Petersen 5341e739730SChristoph Hellwig static struct queue_sysfs_entry queue_max_discard_segments_entry = { 5351e739730SChristoph Hellwig .attr = {.name = "max_discard_segments", .mode = S_IRUGO }, 5361e739730SChristoph Hellwig .show = queue_max_discard_segments_show, 5371e739730SChristoph Hellwig }; 5381e739730SChristoph Hellwig 53913f05c8dSMartin K. Petersen static struct queue_sysfs_entry queue_max_integrity_segments_entry = { 54013f05c8dSMartin K. Petersen .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, 54113f05c8dSMartin K. Petersen .show = queue_max_integrity_segments_show, 54213f05c8dSMartin K. Petersen }; 54313f05c8dSMartin K. Petersen 544c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segment_size_entry = { 545c77a5710SMartin K. Petersen .attr = {.name = "max_segment_size", .mode = S_IRUGO }, 546c77a5710SMartin K. Petersen .show = queue_max_segment_size_show, 547c77a5710SMartin K. Petersen }; 548c77a5710SMartin K. Petersen 5498324aa91SJens Axboe static struct queue_sysfs_entry queue_iosched_entry = { 5508324aa91SJens Axboe .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 5518324aa91SJens Axboe .show = elv_iosched_show, 5528324aa91SJens Axboe .store = elv_iosched_store, 5538324aa91SJens Axboe }; 5548324aa91SJens Axboe 555e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 556e68b903cSMartin K. Petersen .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 557e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 558e1defc4fSMartin K. Petersen }; 559e1defc4fSMartin K. Petersen 560e1defc4fSMartin K. Petersen static struct queue_sysfs_entry queue_logical_block_size_entry = { 561e1defc4fSMartin K. Petersen .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 562e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 563e68b903cSMartin K. Petersen }; 564e68b903cSMartin K. Petersen 565c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_physical_block_size_entry = { 566c72758f3SMartin K. Petersen .attr = {.name = "physical_block_size", .mode = S_IRUGO }, 567c72758f3SMartin K. Petersen .show = queue_physical_block_size_show, 568c72758f3SMartin K. Petersen }; 569c72758f3SMartin K. Petersen 57087caf97cSHannes Reinecke static struct queue_sysfs_entry queue_chunk_sectors_entry = { 57187caf97cSHannes Reinecke .attr = {.name = "chunk_sectors", .mode = S_IRUGO }, 57287caf97cSHannes Reinecke .show = queue_chunk_sectors_show, 57387caf97cSHannes Reinecke }; 57487caf97cSHannes Reinecke 575c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_min_entry = { 576c72758f3SMartin K. Petersen .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, 577c72758f3SMartin K. Petersen .show = queue_io_min_show, 578c72758f3SMartin K. Petersen }; 579c72758f3SMartin K. Petersen 580c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_opt_entry = { 581c72758f3SMartin K. Petersen .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, 582c72758f3SMartin K. Petersen .show = queue_io_opt_show, 5838324aa91SJens Axboe }; 5848324aa91SJens Axboe 58586b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_granularity_entry = { 58686b37281SMartin K. Petersen .attr = {.name = "discard_granularity", .mode = S_IRUGO }, 58786b37281SMartin K. Petersen .show = queue_discard_granularity_show, 58886b37281SMartin K. Petersen }; 58986b37281SMartin K. Petersen 5900034af03SJens Axboe static struct queue_sysfs_entry queue_discard_max_hw_entry = { 5910034af03SJens Axboe .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, 5920034af03SJens Axboe .show = queue_discard_max_hw_show, 5930034af03SJens Axboe }; 5940034af03SJens Axboe 59586b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_max_entry = { 5960034af03SJens Axboe .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, 59786b37281SMartin K. Petersen .show = queue_discard_max_show, 5980034af03SJens Axboe .store = queue_discard_max_store, 59986b37281SMartin K. Petersen }; 60086b37281SMartin K. Petersen 60198262f27SMartin K. Petersen static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 60298262f27SMartin K. Petersen .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, 60398262f27SMartin K. Petersen .show = queue_discard_zeroes_data_show, 60498262f27SMartin K. Petersen }; 60598262f27SMartin K. Petersen 6064363ac7cSMartin K. Petersen static struct queue_sysfs_entry queue_write_same_max_entry = { 6074363ac7cSMartin K. Petersen .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, 6084363ac7cSMartin K. Petersen .show = queue_write_same_max_show, 6094363ac7cSMartin K. Petersen }; 6104363ac7cSMartin K. Petersen 611a6f0788eSChaitanya Kulkarni static struct queue_sysfs_entry queue_write_zeroes_max_entry = { 612a6f0788eSChaitanya Kulkarni .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO }, 613a6f0788eSChaitanya Kulkarni .show = queue_write_zeroes_max_show, 614a6f0788eSChaitanya Kulkarni }; 615a6f0788eSChaitanya Kulkarni 6161308835fSBartlomiej Zolnierkiewicz static struct queue_sysfs_entry queue_nonrot_entry = { 6171308835fSBartlomiej Zolnierkiewicz .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 618956bcb7cSJens Axboe .show = queue_show_nonrot, 619956bcb7cSJens Axboe .store = queue_store_nonrot, 6201308835fSBartlomiej Zolnierkiewicz }; 6211308835fSBartlomiej Zolnierkiewicz 622797476b8SDamien Le Moal static struct queue_sysfs_entry queue_zoned_entry = { 623797476b8SDamien Le Moal .attr = {.name = "zoned", .mode = S_IRUGO }, 624797476b8SDamien Le Moal .show = queue_zoned_show, 625797476b8SDamien Le Moal }; 626797476b8SDamien Le Moal 627ac9fafa1SAlan D. Brunelle static struct queue_sysfs_entry queue_nomerges_entry = { 628ac9fafa1SAlan D. Brunelle .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, 629ac9fafa1SAlan D. Brunelle .show = queue_nomerges_show, 630ac9fafa1SAlan D. Brunelle .store = queue_nomerges_store, 631ac9fafa1SAlan D. Brunelle }; 632ac9fafa1SAlan D. Brunelle 633c7c22e4dSJens Axboe static struct queue_sysfs_entry queue_rq_affinity_entry = { 634c7c22e4dSJens Axboe .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, 635c7c22e4dSJens Axboe .show = queue_rq_affinity_show, 636c7c22e4dSJens Axboe .store = queue_rq_affinity_store, 637c7c22e4dSJens Axboe }; 638c7c22e4dSJens Axboe 639bc58ba94SJens Axboe static struct queue_sysfs_entry queue_iostats_entry = { 640bc58ba94SJens Axboe .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 641956bcb7cSJens Axboe .show = queue_show_iostats, 642956bcb7cSJens Axboe .store = queue_store_iostats, 643bc58ba94SJens Axboe }; 644bc58ba94SJens Axboe 645e2e1a148SJens Axboe static struct queue_sysfs_entry queue_random_entry = { 646e2e1a148SJens Axboe .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, 647956bcb7cSJens Axboe .show = queue_show_random, 648956bcb7cSJens Axboe .store = queue_store_random, 649e2e1a148SJens Axboe }; 650e2e1a148SJens Axboe 65105229beeSJens Axboe static struct queue_sysfs_entry queue_poll_entry = { 65205229beeSJens Axboe .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, 65305229beeSJens Axboe .show = queue_poll_show, 65405229beeSJens Axboe .store = queue_poll_store, 65505229beeSJens Axboe }; 65605229beeSJens Axboe 65706426adfSJens Axboe static struct queue_sysfs_entry queue_poll_delay_entry = { 65806426adfSJens Axboe .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR }, 65906426adfSJens Axboe .show = queue_poll_delay_show, 66006426adfSJens Axboe .store = queue_poll_delay_store, 66106426adfSJens Axboe }; 66206426adfSJens Axboe 66393e9d8e8SJens Axboe static struct queue_sysfs_entry queue_wc_entry = { 66493e9d8e8SJens Axboe .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, 66593e9d8e8SJens Axboe .show = queue_wc_show, 66693e9d8e8SJens Axboe .store = queue_wc_store, 66793e9d8e8SJens Axboe }; 66893e9d8e8SJens Axboe 669ea6ca600SYigal Korman static struct queue_sysfs_entry queue_dax_entry = { 670ea6ca600SYigal Korman .attr = {.name = "dax", .mode = S_IRUGO }, 671ea6ca600SYigal Korman .show = queue_dax_show, 672ea6ca600SYigal Korman }; 673ea6ca600SYigal Korman 67487760e5eSJens Axboe static struct queue_sysfs_entry queue_wb_lat_entry = { 67587760e5eSJens Axboe .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR }, 67687760e5eSJens Axboe .show = queue_wb_lat_show, 67787760e5eSJens Axboe .store = queue_wb_lat_store, 67887760e5eSJens Axboe }; 67987760e5eSJens Axboe 680297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 681297e3d85SShaohua Li static struct queue_sysfs_entry throtl_sample_time_entry = { 682297e3d85SShaohua Li .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR }, 683297e3d85SShaohua Li .show = blk_throtl_sample_time_show, 684297e3d85SShaohua Li .store = blk_throtl_sample_time_store, 685297e3d85SShaohua Li }; 686297e3d85SShaohua Li #endif 687297e3d85SShaohua Li 6888324aa91SJens Axboe static struct attribute *default_attrs[] = { 6898324aa91SJens Axboe &queue_requests_entry.attr, 6908324aa91SJens Axboe &queue_ra_entry.attr, 6918324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 6928324aa91SJens Axboe &queue_max_sectors_entry.attr, 693c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 6941e739730SChristoph Hellwig &queue_max_discard_segments_entry.attr, 69513f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 696c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 6978324aa91SJens Axboe &queue_iosched_entry.attr, 698e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 699e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 700c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 70187caf97cSHannes Reinecke &queue_chunk_sectors_entry.attr, 702c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 703c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 70486b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 70586b37281SMartin K. Petersen &queue_discard_max_entry.attr, 7060034af03SJens Axboe &queue_discard_max_hw_entry.attr, 70798262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 7084363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr, 709a6f0788eSChaitanya Kulkarni &queue_write_zeroes_max_entry.attr, 7101308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 711797476b8SDamien Le Moal &queue_zoned_entry.attr, 712ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 713c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 714bc58ba94SJens Axboe &queue_iostats_entry.attr, 715e2e1a148SJens Axboe &queue_random_entry.attr, 71605229beeSJens Axboe &queue_poll_entry.attr, 71793e9d8e8SJens Axboe &queue_wc_entry.attr, 718ea6ca600SYigal Korman &queue_dax_entry.attr, 71987760e5eSJens Axboe &queue_wb_lat_entry.attr, 72006426adfSJens Axboe &queue_poll_delay_entry.attr, 721297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 722297e3d85SShaohua Li &throtl_sample_time_entry.attr, 723297e3d85SShaohua Li #endif 7248324aa91SJens Axboe NULL, 7258324aa91SJens Axboe }; 7268324aa91SJens Axboe 7278324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 7288324aa91SJens Axboe 7298324aa91SJens Axboe static ssize_t 7308324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 7318324aa91SJens Axboe { 7328324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7338324aa91SJens Axboe struct request_queue *q = 7348324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 7358324aa91SJens Axboe ssize_t res; 7368324aa91SJens Axboe 7378324aa91SJens Axboe if (!entry->show) 7388324aa91SJens Axboe return -EIO; 7398324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7403f3299d5SBart Van Assche if (blk_queue_dying(q)) { 7418324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7428324aa91SJens Axboe return -ENOENT; 7438324aa91SJens Axboe } 7448324aa91SJens Axboe res = entry->show(q, page); 7458324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7468324aa91SJens Axboe return res; 7478324aa91SJens Axboe } 7488324aa91SJens Axboe 7498324aa91SJens Axboe static ssize_t 7508324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 7518324aa91SJens Axboe const char *page, size_t length) 7528324aa91SJens Axboe { 7538324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 7546728cb0eSJens Axboe struct request_queue *q; 7558324aa91SJens Axboe ssize_t res; 7568324aa91SJens Axboe 7578324aa91SJens Axboe if (!entry->store) 7588324aa91SJens Axboe return -EIO; 7596728cb0eSJens Axboe 7606728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 7618324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 7623f3299d5SBart Van Assche if (blk_queue_dying(q)) { 7638324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7648324aa91SJens Axboe return -ENOENT; 7658324aa91SJens Axboe } 7668324aa91SJens Axboe res = entry->store(q, page, length); 7678324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 7688324aa91SJens Axboe return res; 7698324aa91SJens Axboe } 7708324aa91SJens Axboe 771548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head) 772548bc8e1STejun Heo { 773548bc8e1STejun Heo struct request_queue *q = container_of(rcu_head, struct request_queue, 774548bc8e1STejun Heo rcu_head); 775548bc8e1STejun Heo kmem_cache_free(blk_requestq_cachep, q); 776548bc8e1STejun Heo } 777548bc8e1STejun Heo 7788324aa91SJens Axboe /** 779499337bbSAndrew Morton * blk_release_queue: - release a &struct request_queue when it is no longer needed 780499337bbSAndrew Morton * @kobj: the kobj belonging to the request queue to be released 7818324aa91SJens Axboe * 7828324aa91SJens Axboe * Description: 783499337bbSAndrew Morton * blk_release_queue is the pair to blk_init_queue() or 7848324aa91SJens Axboe * blk_queue_make_request(). It should be called when a request queue is 7858324aa91SJens Axboe * being released; typically when a block device is being de-registered. 7868324aa91SJens Axboe * Currently, its primary task it to free all the &struct request 7878324aa91SJens Axboe * structures that were allocated to the queue and the queue itself. 7888324aa91SJens Axboe * 78945a9c9d9SBart Van Assche * Note: 79045a9c9d9SBart Van Assche * The low level driver must have finished any outstanding requests first 79145a9c9d9SBart Van Assche * via blk_cleanup_queue(). 7928324aa91SJens Axboe **/ 7938324aa91SJens Axboe static void blk_release_queue(struct kobject *kobj) 7948324aa91SJens Axboe { 7958324aa91SJens Axboe struct request_queue *q = 7968324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 7978324aa91SJens Axboe 79887760e5eSJens Axboe wbt_exit(q); 79934dbad5dSOmar Sandoval if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 80034dbad5dSOmar Sandoval blk_stat_remove_callback(q, q->poll_cb); 80134dbad5dSOmar Sandoval blk_stat_free_callback(q->poll_cb); 802d03f6cdcSJan Kara bdi_put(q->backing_dev_info); 803e8989faeSTejun Heo blkcg_exit_queue(q); 804e8989faeSTejun Heo 8057e5a8794STejun Heo if (q->elevator) { 8067e5a8794STejun Heo ioc_clear_queue(q); 807777eb1bfSHannes Reinecke elevator_exit(q->elevator); 8087e5a8794STejun Heo } 809777eb1bfSHannes Reinecke 81034dbad5dSOmar Sandoval blk_free_queue_stats(q->stats); 81134dbad5dSOmar Sandoval 812a051661cSTejun Heo blk_exit_rl(&q->root_rl); 8138324aa91SJens Axboe 8148324aa91SJens Axboe if (q->queue_tags) 8158324aa91SJens Axboe __blk_queue_free_tags(q); 8168324aa91SJens Axboe 8176d247d7fSChristoph Hellwig if (!q->mq_ops) { 8186d247d7fSChristoph Hellwig if (q->exit_rq_fn) 8196d247d7fSChristoph Hellwig q->exit_rq_fn(q, q->fq->flush_rq); 820f70ced09SMing Lei blk_free_flush_queue(q->fq); 8216d247d7fSChristoph Hellwig } else { 822e09aae7eSMing Lei blk_mq_release(q); 8236d247d7fSChristoph Hellwig } 82418741986SChristoph Hellwig 8258324aa91SJens Axboe blk_trace_shutdown(q); 8268324aa91SJens Axboe 82762ebce16SOmar Sandoval if (q->mq_ops) 82862ebce16SOmar Sandoval blk_mq_debugfs_unregister(q); 82962ebce16SOmar Sandoval 83054efd50bSKent Overstreet if (q->bio_split) 83154efd50bSKent Overstreet bioset_free(q->bio_split); 83254efd50bSKent Overstreet 833a73f730dSTejun Heo ida_simple_remove(&blk_queue_ida, q->id); 834548bc8e1STejun Heo call_rcu(&q->rcu_head, blk_free_queue_rcu); 8358324aa91SJens Axboe } 8368324aa91SJens Axboe 83752cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 8388324aa91SJens Axboe .show = queue_attr_show, 8398324aa91SJens Axboe .store = queue_attr_store, 8408324aa91SJens Axboe }; 8418324aa91SJens Axboe 8428324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 8438324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 8448324aa91SJens Axboe .default_attrs = default_attrs, 8458324aa91SJens Axboe .release = blk_release_queue, 8468324aa91SJens Axboe }; 8478324aa91SJens Axboe 84887760e5eSJens Axboe static void blk_wb_init(struct request_queue *q) 84987760e5eSJens Axboe { 85087760e5eSJens Axboe #ifndef CONFIG_BLK_WBT_MQ 85187760e5eSJens Axboe if (q->mq_ops) 85287760e5eSJens Axboe return; 85387760e5eSJens Axboe #endif 85487760e5eSJens Axboe #ifndef CONFIG_BLK_WBT_SQ 85587760e5eSJens Axboe if (q->request_fn) 85687760e5eSJens Axboe return; 85787760e5eSJens Axboe #endif 85887760e5eSJens Axboe 85987760e5eSJens Axboe /* 86087760e5eSJens Axboe * If this fails, we don't get throttling 86187760e5eSJens Axboe */ 8628054b89fSJens Axboe wbt_init(q); 86387760e5eSJens Axboe } 86487760e5eSJens Axboe 8658324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 8668324aa91SJens Axboe { 8678324aa91SJens Axboe int ret; 8681d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 8698324aa91SJens Axboe struct request_queue *q = disk->queue; 8708324aa91SJens Axboe 871fb199746SAkinobu Mita if (WARN_ON(!q)) 8728324aa91SJens Axboe return -ENXIO; 8738324aa91SJens Axboe 874749fefe6STejun Heo /* 87517497acbSTejun Heo * SCSI probing may synchronously create and destroy a lot of 87617497acbSTejun Heo * request_queues for non-existent devices. Shutting down a fully 87717497acbSTejun Heo * functional queue takes measureable wallclock time as RCU grace 87817497acbSTejun Heo * periods are involved. To avoid excessive latency in these 87917497acbSTejun Heo * cases, a request_queue starts out in a degraded mode which is 88017497acbSTejun Heo * faster to shut down and is made fully functional here as 88117497acbSTejun Heo * request_queues for non-existent devices never get registered. 882749fefe6STejun Heo */ 883df35c7c9SAlan Stern if (!blk_queue_init_done(q)) { 884320ae51fSJens Axboe queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); 8853ef28e83SDan Williams percpu_ref_switch_to_percpu(&q->q_usage_counter); 886776687bcSTejun Heo blk_queue_bypass_end(q); 887df35c7c9SAlan Stern } 888749fefe6STejun Heo 8891d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 8901d54ad6dSLi Zefan if (ret) 8911d54ad6dSLi Zefan return ret; 8921d54ad6dSLi Zefan 893b410aff2STahsin Erdogan if (q->mq_ops) 894b410aff2STahsin Erdogan blk_mq_register_dev(dev, q); 895b410aff2STahsin Erdogan 896b410aff2STahsin Erdogan /* Prevent changes through sysfs until registration is completed. */ 897b410aff2STahsin Erdogan mutex_lock(&q->sysfs_lock); 898b410aff2STahsin Erdogan 899c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 900ed5302d3SLiu Yuan if (ret < 0) { 901ed5302d3SLiu Yuan blk_trace_remove_sysfs(dev); 902b410aff2STahsin Erdogan goto unlock; 903ed5302d3SLiu Yuan } 9048324aa91SJens Axboe 9058324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_ADD); 9068324aa91SJens Axboe 90787760e5eSJens Axboe blk_wb_init(q); 90887760e5eSJens Axboe 909*d61fcfa4SShaohua Li blk_throtl_register_queue(q); 910*d61fcfa4SShaohua Li 91180c6b157SOmar Sandoval if (q->request_fn || (q->mq_ops && q->elevator)) { 9128324aa91SJens Axboe ret = elv_register_queue(q); 9138324aa91SJens Axboe if (ret) { 9148324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9158324aa91SJens Axboe kobject_del(&q->kobj); 91680656b67SLiu Yuan blk_trace_remove_sysfs(dev); 917c87ffbb8SXiaotian Feng kobject_put(&dev->kobj); 918b410aff2STahsin Erdogan goto unlock; 919b410aff2STahsin Erdogan } 920b410aff2STahsin Erdogan } 921b410aff2STahsin Erdogan ret = 0; 922b410aff2STahsin Erdogan unlock: 923b410aff2STahsin Erdogan mutex_unlock(&q->sysfs_lock); 9248324aa91SJens Axboe return ret; 9258324aa91SJens Axboe } 9268324aa91SJens Axboe 9278324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 9288324aa91SJens Axboe { 9298324aa91SJens Axboe struct request_queue *q = disk->queue; 9308324aa91SJens Axboe 931fb199746SAkinobu Mita if (WARN_ON(!q)) 932fb199746SAkinobu Mita return; 933fb199746SAkinobu Mita 934320ae51fSJens Axboe if (q->mq_ops) 935b21d5b30SMatias Bjørling blk_mq_unregister_dev(disk_to_dev(disk), q); 936320ae51fSJens Axboe 93780c6b157SOmar Sandoval if (q->request_fn || (q->mq_ops && q->elevator)) 9388324aa91SJens Axboe elv_unregister_queue(q); 9398324aa91SJens Axboe 9408324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 9418324aa91SJens Axboe kobject_del(&q->kobj); 94248c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 943ed9e1982STejun Heo kobject_put(&disk_to_dev(disk)->kobj); 9448324aa91SJens Axboe } 945