18324aa91SJens Axboe /* 28324aa91SJens Axboe * Functions related to sysfs handling 38324aa91SJens Axboe */ 48324aa91SJens Axboe #include <linux/kernel.h> 55a0e3ad6STejun Heo #include <linux/slab.h> 68324aa91SJens Axboe #include <linux/module.h> 78324aa91SJens Axboe #include <linux/bio.h> 88324aa91SJens Axboe #include <linux/blkdev.h> 98324aa91SJens Axboe #include <linux/blktrace_api.h> 10320ae51fSJens Axboe #include <linux/blk-mq.h> 118324aa91SJens Axboe 128324aa91SJens Axboe #include "blk.h" 135efd6113STejun Heo #include "blk-cgroup.h" 143edcc0ceSMing Lei #include "blk-mq.h" 158324aa91SJens Axboe 168324aa91SJens Axboe struct queue_sysfs_entry { 178324aa91SJens Axboe struct attribute attr; 188324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 198324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 208324aa91SJens Axboe }; 218324aa91SJens Axboe 228324aa91SJens Axboe static ssize_t 239cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 248324aa91SJens Axboe { 259cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 268324aa91SJens Axboe } 278324aa91SJens Axboe 288324aa91SJens Axboe static ssize_t 298324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 308324aa91SJens Axboe { 31b1f3b64dSDave Reisner int err; 32b1f3b64dSDave Reisner unsigned long v; 338324aa91SJens Axboe 34ed751e68SJingoo Han err = kstrtoul(page, 10, &v); 35b1f3b64dSDave Reisner if (err || v > UINT_MAX) 36b1f3b64dSDave Reisner return -EINVAL; 37b1f3b64dSDave Reisner 38b1f3b64dSDave Reisner *var = v; 39b1f3b64dSDave Reisner 408324aa91SJens Axboe return count; 418324aa91SJens Axboe } 428324aa91SJens Axboe 438324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 448324aa91SJens Axboe { 458324aa91SJens Axboe return queue_var_show(q->nr_requests, (page)); 468324aa91SJens Axboe } 478324aa91SJens Axboe 488324aa91SJens Axboe static ssize_t 498324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 508324aa91SJens Axboe { 518324aa91SJens Axboe unsigned long nr; 52*e3a2b3f9SJens Axboe int ret, err; 53b8a9ae77SJens Axboe 54*e3a2b3f9SJens Axboe if (!q->request_fn && !q->mq_ops) 55b8a9ae77SJens Axboe return -EINVAL; 56b8a9ae77SJens Axboe 57b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 58b1f3b64dSDave Reisner if (ret < 0) 59b1f3b64dSDave Reisner return ret; 60b1f3b64dSDave Reisner 618324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 628324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 638324aa91SJens Axboe 64*e3a2b3f9SJens Axboe if (q->request_fn) 65*e3a2b3f9SJens Axboe err = blk_update_nr_requests(q, nr); 66*e3a2b3f9SJens Axboe else 67*e3a2b3f9SJens Axboe err = blk_mq_update_nr_requests(q, nr); 688324aa91SJens Axboe 69*e3a2b3f9SJens Axboe if (err) 70*e3a2b3f9SJens Axboe return err; 71a051661cSTejun Heo 728324aa91SJens Axboe return ret; 738324aa91SJens Axboe } 748324aa91SJens Axboe 758324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 768324aa91SJens Axboe { 779cb308ceSXiaotian Feng unsigned long ra_kb = q->backing_dev_info.ra_pages << 789cb308ceSXiaotian Feng (PAGE_CACHE_SHIFT - 10); 798324aa91SJens Axboe 808324aa91SJens Axboe return queue_var_show(ra_kb, (page)); 818324aa91SJens Axboe } 828324aa91SJens Axboe 838324aa91SJens Axboe static ssize_t 848324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 858324aa91SJens Axboe { 868324aa91SJens Axboe unsigned long ra_kb; 878324aa91SJens Axboe ssize_t ret = queue_var_store(&ra_kb, page, count); 888324aa91SJens Axboe 89b1f3b64dSDave Reisner if (ret < 0) 90b1f3b64dSDave Reisner return ret; 91b1f3b64dSDave Reisner 928324aa91SJens Axboe q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 938324aa91SJens Axboe 948324aa91SJens Axboe return ret; 958324aa91SJens Axboe } 968324aa91SJens Axboe 978324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 988324aa91SJens Axboe { 99ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1008324aa91SJens Axboe 1018324aa91SJens Axboe return queue_var_show(max_sectors_kb, (page)); 1028324aa91SJens Axboe } 1038324aa91SJens Axboe 104c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 105c77a5710SMartin K. Petersen { 106c77a5710SMartin K. Petersen return queue_var_show(queue_max_segments(q), (page)); 107c77a5710SMartin K. Petersen } 108c77a5710SMartin K. Petersen 10913f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 11013f05c8dSMartin K. Petersen { 11113f05c8dSMartin K. Petersen return queue_var_show(q->limits.max_integrity_segments, (page)); 11213f05c8dSMartin K. Petersen } 11313f05c8dSMartin K. Petersen 114c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 115c77a5710SMartin K. Petersen { 116e692cb66SMartin K. Petersen if (blk_queue_cluster(q)) 117c77a5710SMartin K. Petersen return queue_var_show(queue_max_segment_size(q), (page)); 118c77a5710SMartin K. Petersen 119c77a5710SMartin K. Petersen return queue_var_show(PAGE_CACHE_SIZE, (page)); 120c77a5710SMartin K. Petersen } 121c77a5710SMartin K. Petersen 122e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 123e68b903cSMartin K. Petersen { 124e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 125e68b903cSMartin K. Petersen } 126e68b903cSMartin K. Petersen 127c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 128c72758f3SMartin K. Petersen { 129c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 130c72758f3SMartin K. Petersen } 131c72758f3SMartin K. Petersen 132c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 133c72758f3SMartin K. Petersen { 134c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 135c72758f3SMartin K. Petersen } 136c72758f3SMartin K. Petersen 137c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 138c72758f3SMartin K. Petersen { 139c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1408324aa91SJens Axboe } 1418324aa91SJens Axboe 14286b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 14386b37281SMartin K. Petersen { 14486b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 14586b37281SMartin K. Petersen } 14686b37281SMartin K. Petersen 14786b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 14886b37281SMartin K. Petersen { 149a934a00aSMartin K. Petersen return sprintf(page, "%llu\n", 150a934a00aSMartin K. Petersen (unsigned long long)q->limits.max_discard_sectors << 9); 15186b37281SMartin K. Petersen } 15286b37281SMartin K. Petersen 15398262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 15498262f27SMartin K. Petersen { 15598262f27SMartin K. Petersen return queue_var_show(queue_discard_zeroes_data(q), page); 15698262f27SMartin K. Petersen } 15798262f27SMartin K. Petersen 1584363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 1594363ac7cSMartin K. Petersen { 1604363ac7cSMartin K. Petersen return sprintf(page, "%llu\n", 1614363ac7cSMartin K. Petersen (unsigned long long)q->limits.max_write_same_sectors << 9); 1624363ac7cSMartin K. Petersen } 1634363ac7cSMartin K. Petersen 1644363ac7cSMartin K. Petersen 1658324aa91SJens Axboe static ssize_t 1668324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 1678324aa91SJens Axboe { 1688324aa91SJens Axboe unsigned long max_sectors_kb, 169ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 1708324aa91SJens Axboe page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 1718324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 1728324aa91SJens Axboe 173b1f3b64dSDave Reisner if (ret < 0) 174b1f3b64dSDave Reisner return ret; 175b1f3b64dSDave Reisner 1768324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 1778324aa91SJens Axboe return -EINVAL; 1787c239517SWu Fengguang 1798324aa91SJens Axboe spin_lock_irq(q->queue_lock); 180c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 1818324aa91SJens Axboe spin_unlock_irq(q->queue_lock); 1828324aa91SJens Axboe 1838324aa91SJens Axboe return ret; 1848324aa91SJens Axboe } 1858324aa91SJens Axboe 1868324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 1878324aa91SJens Axboe { 188ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 1898324aa91SJens Axboe 1908324aa91SJens Axboe return queue_var_show(max_hw_sectors_kb, (page)); 1918324aa91SJens Axboe } 1928324aa91SJens Axboe 193956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 194956bcb7cSJens Axboe static ssize_t \ 195956bcb7cSJens Axboe queue_show_##name(struct request_queue *q, char *page) \ 196956bcb7cSJens Axboe { \ 197956bcb7cSJens Axboe int bit; \ 198956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 199956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 200956bcb7cSJens Axboe } \ 201956bcb7cSJens Axboe static ssize_t \ 202956bcb7cSJens Axboe queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 203956bcb7cSJens Axboe { \ 204956bcb7cSJens Axboe unsigned long val; \ 205956bcb7cSJens Axboe ssize_t ret; \ 206956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 207c678ef52SArnd Bergmann if (ret < 0) \ 208c678ef52SArnd Bergmann return ret; \ 209956bcb7cSJens Axboe if (neg) \ 210956bcb7cSJens Axboe val = !val; \ 211956bcb7cSJens Axboe \ 212956bcb7cSJens Axboe spin_lock_irq(q->queue_lock); \ 213956bcb7cSJens Axboe if (val) \ 214956bcb7cSJens Axboe queue_flag_set(QUEUE_FLAG_##flag, q); \ 215956bcb7cSJens Axboe else \ 216956bcb7cSJens Axboe queue_flag_clear(QUEUE_FLAG_##flag, q); \ 217956bcb7cSJens Axboe spin_unlock_irq(q->queue_lock); \ 218956bcb7cSJens Axboe return ret; \ 2191308835fSBartlomiej Zolnierkiewicz } 2201308835fSBartlomiej Zolnierkiewicz 221956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 222956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 223956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 224956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 2251308835fSBartlomiej Zolnierkiewicz 226ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 227ac9fafa1SAlan D. Brunelle { 228488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 229488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 230ac9fafa1SAlan D. Brunelle } 231ac9fafa1SAlan D. Brunelle 232ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 233ac9fafa1SAlan D. Brunelle size_t count) 234ac9fafa1SAlan D. Brunelle { 235ac9fafa1SAlan D. Brunelle unsigned long nm; 236ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 237ac9fafa1SAlan D. Brunelle 238b1f3b64dSDave Reisner if (ret < 0) 239b1f3b64dSDave Reisner return ret; 240b1f3b64dSDave Reisner 241bf0f9702SJens Axboe spin_lock_irq(q->queue_lock); 242bf0f9702SJens Axboe queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 243488991e2SAlan D. Brunelle queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 244488991e2SAlan D. Brunelle if (nm == 2) 245488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOMERGES, q); 246488991e2SAlan D. Brunelle else if (nm) 247488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 248bf0f9702SJens Axboe spin_unlock_irq(q->queue_lock); 2491308835fSBartlomiej Zolnierkiewicz 250ac9fafa1SAlan D. Brunelle return ret; 251ac9fafa1SAlan D. Brunelle } 252ac9fafa1SAlan D. Brunelle 253c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 254c7c22e4dSJens Axboe { 2559cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 2565757a6d7SDan Williams bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 257c7c22e4dSJens Axboe 2585757a6d7SDan Williams return queue_var_show(set << force, page); 259c7c22e4dSJens Axboe } 260c7c22e4dSJens Axboe 261c7c22e4dSJens Axboe static ssize_t 262c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 263c7c22e4dSJens Axboe { 264c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 2650a06ff06SChristoph Hellwig #ifdef CONFIG_SMP 266c7c22e4dSJens Axboe unsigned long val; 267c7c22e4dSJens Axboe 268c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 269b1f3b64dSDave Reisner if (ret < 0) 270b1f3b64dSDave Reisner return ret; 271b1f3b64dSDave Reisner 272c7c22e4dSJens Axboe spin_lock_irq(q->queue_lock); 273e8037d49SEric Seppanen if (val == 2) { 274c7c22e4dSJens Axboe queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 2755757a6d7SDan Williams queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 276e8037d49SEric Seppanen } else if (val == 1) { 277e8037d49SEric Seppanen queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 278e8037d49SEric Seppanen queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 279e8037d49SEric Seppanen } else if (val == 0) { 280c7c22e4dSJens Axboe queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 2815757a6d7SDan Williams queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 2825757a6d7SDan Williams } 283c7c22e4dSJens Axboe spin_unlock_irq(q->queue_lock); 284c7c22e4dSJens Axboe #endif 285c7c22e4dSJens Axboe return ret; 286c7c22e4dSJens Axboe } 2878324aa91SJens Axboe 2888324aa91SJens Axboe static struct queue_sysfs_entry queue_requests_entry = { 2898324aa91SJens Axboe .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 2908324aa91SJens Axboe .show = queue_requests_show, 2918324aa91SJens Axboe .store = queue_requests_store, 2928324aa91SJens Axboe }; 2938324aa91SJens Axboe 2948324aa91SJens Axboe static struct queue_sysfs_entry queue_ra_entry = { 2958324aa91SJens Axboe .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, 2968324aa91SJens Axboe .show = queue_ra_show, 2978324aa91SJens Axboe .store = queue_ra_store, 2988324aa91SJens Axboe }; 2998324aa91SJens Axboe 3008324aa91SJens Axboe static struct queue_sysfs_entry queue_max_sectors_entry = { 3018324aa91SJens Axboe .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, 3028324aa91SJens Axboe .show = queue_max_sectors_show, 3038324aa91SJens Axboe .store = queue_max_sectors_store, 3048324aa91SJens Axboe }; 3058324aa91SJens Axboe 3068324aa91SJens Axboe static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 3078324aa91SJens Axboe .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, 3088324aa91SJens Axboe .show = queue_max_hw_sectors_show, 3098324aa91SJens Axboe }; 3108324aa91SJens Axboe 311c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segments_entry = { 312c77a5710SMartin K. Petersen .attr = {.name = "max_segments", .mode = S_IRUGO }, 313c77a5710SMartin K. Petersen .show = queue_max_segments_show, 314c77a5710SMartin K. Petersen }; 315c77a5710SMartin K. Petersen 31613f05c8dSMartin K. Petersen static struct queue_sysfs_entry queue_max_integrity_segments_entry = { 31713f05c8dSMartin K. Petersen .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, 31813f05c8dSMartin K. Petersen .show = queue_max_integrity_segments_show, 31913f05c8dSMartin K. Petersen }; 32013f05c8dSMartin K. Petersen 321c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segment_size_entry = { 322c77a5710SMartin K. Petersen .attr = {.name = "max_segment_size", .mode = S_IRUGO }, 323c77a5710SMartin K. Petersen .show = queue_max_segment_size_show, 324c77a5710SMartin K. Petersen }; 325c77a5710SMartin K. Petersen 3268324aa91SJens Axboe static struct queue_sysfs_entry queue_iosched_entry = { 3278324aa91SJens Axboe .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 3288324aa91SJens Axboe .show = elv_iosched_show, 3298324aa91SJens Axboe .store = elv_iosched_store, 3308324aa91SJens Axboe }; 3318324aa91SJens Axboe 332e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 333e68b903cSMartin K. Petersen .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 334e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 335e1defc4fSMartin K. Petersen }; 336e1defc4fSMartin K. Petersen 337e1defc4fSMartin K. Petersen static struct queue_sysfs_entry queue_logical_block_size_entry = { 338e1defc4fSMartin K. Petersen .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 339e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 340e68b903cSMartin K. Petersen }; 341e68b903cSMartin K. Petersen 342c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_physical_block_size_entry = { 343c72758f3SMartin K. Petersen .attr = {.name = "physical_block_size", .mode = S_IRUGO }, 344c72758f3SMartin K. Petersen .show = queue_physical_block_size_show, 345c72758f3SMartin K. Petersen }; 346c72758f3SMartin K. Petersen 347c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_min_entry = { 348c72758f3SMartin K. Petersen .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, 349c72758f3SMartin K. Petersen .show = queue_io_min_show, 350c72758f3SMartin K. Petersen }; 351c72758f3SMartin K. Petersen 352c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_opt_entry = { 353c72758f3SMartin K. Petersen .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, 354c72758f3SMartin K. Petersen .show = queue_io_opt_show, 3558324aa91SJens Axboe }; 3568324aa91SJens Axboe 35786b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_granularity_entry = { 35886b37281SMartin K. Petersen .attr = {.name = "discard_granularity", .mode = S_IRUGO }, 35986b37281SMartin K. Petersen .show = queue_discard_granularity_show, 36086b37281SMartin K. Petersen }; 36186b37281SMartin K. Petersen 36286b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_max_entry = { 36386b37281SMartin K. Petersen .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, 36486b37281SMartin K. Petersen .show = queue_discard_max_show, 36586b37281SMartin K. Petersen }; 36686b37281SMartin K. Petersen 36798262f27SMartin K. Petersen static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 36898262f27SMartin K. Petersen .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, 36998262f27SMartin K. Petersen .show = queue_discard_zeroes_data_show, 37098262f27SMartin K. Petersen }; 37198262f27SMartin K. Petersen 3724363ac7cSMartin K. Petersen static struct queue_sysfs_entry queue_write_same_max_entry = { 3734363ac7cSMartin K. Petersen .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, 3744363ac7cSMartin K. Petersen .show = queue_write_same_max_show, 3754363ac7cSMartin K. Petersen }; 3764363ac7cSMartin K. Petersen 3771308835fSBartlomiej Zolnierkiewicz static struct queue_sysfs_entry queue_nonrot_entry = { 3781308835fSBartlomiej Zolnierkiewicz .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 379956bcb7cSJens Axboe .show = queue_show_nonrot, 380956bcb7cSJens Axboe .store = queue_store_nonrot, 3811308835fSBartlomiej Zolnierkiewicz }; 3821308835fSBartlomiej Zolnierkiewicz 383ac9fafa1SAlan D. Brunelle static struct queue_sysfs_entry queue_nomerges_entry = { 384ac9fafa1SAlan D. Brunelle .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, 385ac9fafa1SAlan D. Brunelle .show = queue_nomerges_show, 386ac9fafa1SAlan D. Brunelle .store = queue_nomerges_store, 387ac9fafa1SAlan D. Brunelle }; 388ac9fafa1SAlan D. Brunelle 389c7c22e4dSJens Axboe static struct queue_sysfs_entry queue_rq_affinity_entry = { 390c7c22e4dSJens Axboe .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, 391c7c22e4dSJens Axboe .show = queue_rq_affinity_show, 392c7c22e4dSJens Axboe .store = queue_rq_affinity_store, 393c7c22e4dSJens Axboe }; 394c7c22e4dSJens Axboe 395bc58ba94SJens Axboe static struct queue_sysfs_entry queue_iostats_entry = { 396bc58ba94SJens Axboe .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 397956bcb7cSJens Axboe .show = queue_show_iostats, 398956bcb7cSJens Axboe .store = queue_store_iostats, 399bc58ba94SJens Axboe }; 400bc58ba94SJens Axboe 401e2e1a148SJens Axboe static struct queue_sysfs_entry queue_random_entry = { 402e2e1a148SJens Axboe .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, 403956bcb7cSJens Axboe .show = queue_show_random, 404956bcb7cSJens Axboe .store = queue_store_random, 405e2e1a148SJens Axboe }; 406e2e1a148SJens Axboe 4078324aa91SJens Axboe static struct attribute *default_attrs[] = { 4088324aa91SJens Axboe &queue_requests_entry.attr, 4098324aa91SJens Axboe &queue_ra_entry.attr, 4108324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 4118324aa91SJens Axboe &queue_max_sectors_entry.attr, 412c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 41313f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 414c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 4158324aa91SJens Axboe &queue_iosched_entry.attr, 416e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 417e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 418c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 419c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 420c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 42186b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 42286b37281SMartin K. Petersen &queue_discard_max_entry.attr, 42398262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 4244363ac7cSMartin K. Petersen &queue_write_same_max_entry.attr, 4251308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 426ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 427c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 428bc58ba94SJens Axboe &queue_iostats_entry.attr, 429e2e1a148SJens Axboe &queue_random_entry.attr, 4308324aa91SJens Axboe NULL, 4318324aa91SJens Axboe }; 4328324aa91SJens Axboe 4338324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 4348324aa91SJens Axboe 4358324aa91SJens Axboe static ssize_t 4368324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4378324aa91SJens Axboe { 4388324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 4398324aa91SJens Axboe struct request_queue *q = 4408324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 4418324aa91SJens Axboe ssize_t res; 4428324aa91SJens Axboe 4438324aa91SJens Axboe if (!entry->show) 4448324aa91SJens Axboe return -EIO; 4458324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 4463f3299d5SBart Van Assche if (blk_queue_dying(q)) { 4478324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4488324aa91SJens Axboe return -ENOENT; 4498324aa91SJens Axboe } 4508324aa91SJens Axboe res = entry->show(q, page); 4518324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4528324aa91SJens Axboe return res; 4538324aa91SJens Axboe } 4548324aa91SJens Axboe 4558324aa91SJens Axboe static ssize_t 4568324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 4578324aa91SJens Axboe const char *page, size_t length) 4588324aa91SJens Axboe { 4598324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 4606728cb0eSJens Axboe struct request_queue *q; 4618324aa91SJens Axboe ssize_t res; 4628324aa91SJens Axboe 4638324aa91SJens Axboe if (!entry->store) 4648324aa91SJens Axboe return -EIO; 4656728cb0eSJens Axboe 4666728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 4678324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 4683f3299d5SBart Van Assche if (blk_queue_dying(q)) { 4698324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4708324aa91SJens Axboe return -ENOENT; 4718324aa91SJens Axboe } 4728324aa91SJens Axboe res = entry->store(q, page, length); 4738324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4748324aa91SJens Axboe return res; 4758324aa91SJens Axboe } 4768324aa91SJens Axboe 477548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head) 478548bc8e1STejun Heo { 479548bc8e1STejun Heo struct request_queue *q = container_of(rcu_head, struct request_queue, 480548bc8e1STejun Heo rcu_head); 481548bc8e1STejun Heo kmem_cache_free(blk_requestq_cachep, q); 482548bc8e1STejun Heo } 483548bc8e1STejun Heo 4848324aa91SJens Axboe /** 485499337bbSAndrew Morton * blk_release_queue: - release a &struct request_queue when it is no longer needed 486499337bbSAndrew Morton * @kobj: the kobj belonging to the request queue to be released 4878324aa91SJens Axboe * 4888324aa91SJens Axboe * Description: 489499337bbSAndrew Morton * blk_release_queue is the pair to blk_init_queue() or 4908324aa91SJens Axboe * blk_queue_make_request(). It should be called when a request queue is 4918324aa91SJens Axboe * being released; typically when a block device is being de-registered. 4928324aa91SJens Axboe * Currently, its primary task it to free all the &struct request 4938324aa91SJens Axboe * structures that were allocated to the queue and the queue itself. 4948324aa91SJens Axboe * 4958324aa91SJens Axboe * Caveat: 4968324aa91SJens Axboe * Hopefully the low level driver will have finished any 4978324aa91SJens Axboe * outstanding requests first... 4988324aa91SJens Axboe **/ 4998324aa91SJens Axboe static void blk_release_queue(struct kobject *kobj) 5008324aa91SJens Axboe { 5018324aa91SJens Axboe struct request_queue *q = 5028324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 5038324aa91SJens Axboe 5048324aa91SJens Axboe blk_sync_queue(q); 5058324aa91SJens Axboe 506e8989faeSTejun Heo blkcg_exit_queue(q); 507e8989faeSTejun Heo 5087e5a8794STejun Heo if (q->elevator) { 5097e5a8794STejun Heo spin_lock_irq(q->queue_lock); 5107e5a8794STejun Heo ioc_clear_queue(q); 5117e5a8794STejun Heo spin_unlock_irq(q->queue_lock); 512777eb1bfSHannes Reinecke elevator_exit(q->elevator); 5137e5a8794STejun Heo } 514777eb1bfSHannes Reinecke 515a051661cSTejun Heo blk_exit_rl(&q->root_rl); 5168324aa91SJens Axboe 5178324aa91SJens Axboe if (q->queue_tags) 5188324aa91SJens Axboe __blk_queue_free_tags(q); 5198324aa91SJens Axboe 520320ae51fSJens Axboe percpu_counter_destroy(&q->mq_usage_counter); 521320ae51fSJens Axboe 522320ae51fSJens Axboe if (q->mq_ops) 523320ae51fSJens Axboe blk_mq_free_queue(q); 524320ae51fSJens Axboe 52518741986SChristoph Hellwig kfree(q->flush_rq); 52618741986SChristoph Hellwig 5278324aa91SJens Axboe blk_trace_shutdown(q); 5288324aa91SJens Axboe 5298324aa91SJens Axboe bdi_destroy(&q->backing_dev_info); 530a73f730dSTejun Heo 531a73f730dSTejun Heo ida_simple_remove(&blk_queue_ida, q->id); 532548bc8e1STejun Heo call_rcu(&q->rcu_head, blk_free_queue_rcu); 5338324aa91SJens Axboe } 5348324aa91SJens Axboe 53552cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 5368324aa91SJens Axboe .show = queue_attr_show, 5378324aa91SJens Axboe .store = queue_attr_store, 5388324aa91SJens Axboe }; 5398324aa91SJens Axboe 5408324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 5418324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 5428324aa91SJens Axboe .default_attrs = default_attrs, 5438324aa91SJens Axboe .release = blk_release_queue, 5448324aa91SJens Axboe }; 5458324aa91SJens Axboe 5468324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 5478324aa91SJens Axboe { 5488324aa91SJens Axboe int ret; 5491d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 5508324aa91SJens Axboe struct request_queue *q = disk->queue; 5518324aa91SJens Axboe 552fb199746SAkinobu Mita if (WARN_ON(!q)) 5538324aa91SJens Axboe return -ENXIO; 5548324aa91SJens Axboe 555749fefe6STejun Heo /* 556749fefe6STejun Heo * Initialization must be complete by now. Finish the initial 557749fefe6STejun Heo * bypass from queue allocation. 558749fefe6STejun Heo */ 559749fefe6STejun Heo blk_queue_bypass_end(q); 560320ae51fSJens Axboe queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); 561749fefe6STejun Heo 5621d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 5631d54ad6dSLi Zefan if (ret) 5641d54ad6dSLi Zefan return ret; 5651d54ad6dSLi Zefan 566c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 567ed5302d3SLiu Yuan if (ret < 0) { 568ed5302d3SLiu Yuan blk_trace_remove_sysfs(dev); 5698324aa91SJens Axboe return ret; 570ed5302d3SLiu Yuan } 5718324aa91SJens Axboe 5728324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_ADD); 5738324aa91SJens Axboe 574320ae51fSJens Axboe if (q->mq_ops) 575320ae51fSJens Axboe blk_mq_register_disk(disk); 576320ae51fSJens Axboe 577cd43e26fSMartin K. Petersen if (!q->request_fn) 578cd43e26fSMartin K. Petersen return 0; 579cd43e26fSMartin K. Petersen 5808324aa91SJens Axboe ret = elv_register_queue(q); 5818324aa91SJens Axboe if (ret) { 5828324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 5838324aa91SJens Axboe kobject_del(&q->kobj); 58480656b67SLiu Yuan blk_trace_remove_sysfs(dev); 585c87ffbb8SXiaotian Feng kobject_put(&dev->kobj); 5868324aa91SJens Axboe return ret; 5878324aa91SJens Axboe } 5888324aa91SJens Axboe 5898324aa91SJens Axboe return 0; 5908324aa91SJens Axboe } 5918324aa91SJens Axboe 5928324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 5938324aa91SJens Axboe { 5948324aa91SJens Axboe struct request_queue *q = disk->queue; 5958324aa91SJens Axboe 596fb199746SAkinobu Mita if (WARN_ON(!q)) 597fb199746SAkinobu Mita return; 598fb199746SAkinobu Mita 599320ae51fSJens Axboe if (q->mq_ops) 600320ae51fSJens Axboe blk_mq_unregister_disk(disk); 601320ae51fSJens Axboe 60248c0d4d4SZdenek Kabelac if (q->request_fn) 6038324aa91SJens Axboe elv_unregister_queue(q); 6048324aa91SJens Axboe 6058324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 6068324aa91SJens Axboe kobject_del(&q->kobj); 60748c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 608ed9e1982STejun Heo kobject_put(&disk_to_dev(disk)->kobj); 6098324aa91SJens Axboe } 610