18324aa91SJens Axboe /* 28324aa91SJens Axboe * Functions related to sysfs handling 38324aa91SJens Axboe */ 48324aa91SJens Axboe #include <linux/kernel.h> 55a0e3ad6STejun Heo #include <linux/slab.h> 68324aa91SJens Axboe #include <linux/module.h> 78324aa91SJens Axboe #include <linux/bio.h> 88324aa91SJens Axboe #include <linux/blkdev.h> 98324aa91SJens Axboe #include <linux/blktrace_api.h> 108324aa91SJens Axboe 118324aa91SJens Axboe #include "blk.h" 128324aa91SJens Axboe 138324aa91SJens Axboe struct queue_sysfs_entry { 148324aa91SJens Axboe struct attribute attr; 158324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 168324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 178324aa91SJens Axboe }; 188324aa91SJens Axboe 198324aa91SJens Axboe static ssize_t 209cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 218324aa91SJens Axboe { 229cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 238324aa91SJens Axboe } 248324aa91SJens Axboe 258324aa91SJens Axboe static ssize_t 268324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 278324aa91SJens Axboe { 288324aa91SJens Axboe char *p = (char *) page; 298324aa91SJens Axboe 308324aa91SJens Axboe *var = simple_strtoul(p, &p, 10); 318324aa91SJens Axboe return count; 328324aa91SJens Axboe } 338324aa91SJens Axboe 348324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 358324aa91SJens Axboe { 368324aa91SJens Axboe return queue_var_show(q->nr_requests, (page)); 378324aa91SJens Axboe } 388324aa91SJens Axboe 398324aa91SJens Axboe static ssize_t 408324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 418324aa91SJens Axboe { 428324aa91SJens Axboe struct request_list *rl = &q->rq; 438324aa91SJens Axboe unsigned long nr; 44b8a9ae77SJens Axboe int ret; 45b8a9ae77SJens Axboe 46b8a9ae77SJens Axboe if (!q->request_fn) 47b8a9ae77SJens Axboe return -EINVAL; 48b8a9ae77SJens Axboe 49b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 508324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 518324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 528324aa91SJens Axboe 538324aa91SJens Axboe spin_lock_irq(q->queue_lock); 548324aa91SJens Axboe q->nr_requests = nr; 558324aa91SJens Axboe blk_queue_congestion_threshold(q); 568324aa91SJens Axboe 571faa16d2SJens Axboe if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) 581faa16d2SJens Axboe blk_set_queue_congested(q, BLK_RW_SYNC); 591faa16d2SJens Axboe else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) 601faa16d2SJens Axboe blk_clear_queue_congested(q, BLK_RW_SYNC); 618324aa91SJens Axboe 621faa16d2SJens Axboe if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) 631faa16d2SJens Axboe blk_set_queue_congested(q, BLK_RW_ASYNC); 641faa16d2SJens Axboe else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) 651faa16d2SJens Axboe blk_clear_queue_congested(q, BLK_RW_ASYNC); 668324aa91SJens Axboe 671faa16d2SJens Axboe if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 681faa16d2SJens Axboe blk_set_queue_full(q, BLK_RW_SYNC); 691faa16d2SJens Axboe } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { 701faa16d2SJens Axboe blk_clear_queue_full(q, BLK_RW_SYNC); 711faa16d2SJens Axboe wake_up(&rl->wait[BLK_RW_SYNC]); 728324aa91SJens Axboe } 738324aa91SJens Axboe 741faa16d2SJens Axboe if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 751faa16d2SJens Axboe blk_set_queue_full(q, BLK_RW_ASYNC); 761faa16d2SJens Axboe } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { 771faa16d2SJens Axboe blk_clear_queue_full(q, BLK_RW_ASYNC); 781faa16d2SJens Axboe wake_up(&rl->wait[BLK_RW_ASYNC]); 798324aa91SJens Axboe } 808324aa91SJens Axboe spin_unlock_irq(q->queue_lock); 818324aa91SJens Axboe return ret; 828324aa91SJens Axboe } 838324aa91SJens Axboe 848324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 858324aa91SJens Axboe { 869cb308ceSXiaotian Feng unsigned long ra_kb = q->backing_dev_info.ra_pages << 879cb308ceSXiaotian Feng (PAGE_CACHE_SHIFT - 10); 888324aa91SJens Axboe 898324aa91SJens Axboe return queue_var_show(ra_kb, (page)); 908324aa91SJens Axboe } 918324aa91SJens Axboe 928324aa91SJens Axboe static ssize_t 938324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 948324aa91SJens Axboe { 958324aa91SJens Axboe unsigned long ra_kb; 968324aa91SJens Axboe ssize_t ret = queue_var_store(&ra_kb, page, count); 978324aa91SJens Axboe 988324aa91SJens Axboe q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 998324aa91SJens Axboe 1008324aa91SJens Axboe return ret; 1018324aa91SJens Axboe } 1028324aa91SJens Axboe 1038324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1048324aa91SJens Axboe { 105ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1068324aa91SJens Axboe 1078324aa91SJens Axboe return queue_var_show(max_sectors_kb, (page)); 1088324aa91SJens Axboe } 1098324aa91SJens Axboe 110c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 111c77a5710SMartin K. Petersen { 112c77a5710SMartin K. Petersen return queue_var_show(queue_max_segments(q), (page)); 113c77a5710SMartin K. Petersen } 114c77a5710SMartin K. Petersen 11513f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 11613f05c8dSMartin K. Petersen { 11713f05c8dSMartin K. Petersen return queue_var_show(q->limits.max_integrity_segments, (page)); 11813f05c8dSMartin K. Petersen } 11913f05c8dSMartin K. Petersen 120c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 121c77a5710SMartin K. Petersen { 122*e692cb66SMartin K. Petersen if (blk_queue_cluster(q)) 123c77a5710SMartin K. Petersen return queue_var_show(queue_max_segment_size(q), (page)); 124c77a5710SMartin K. Petersen 125c77a5710SMartin K. Petersen return queue_var_show(PAGE_CACHE_SIZE, (page)); 126c77a5710SMartin K. Petersen } 127c77a5710SMartin K. Petersen 128e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 129e68b903cSMartin K. Petersen { 130e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 131e68b903cSMartin K. Petersen } 132e68b903cSMartin K. Petersen 133c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 134c72758f3SMartin K. Petersen { 135c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 136c72758f3SMartin K. Petersen } 137c72758f3SMartin K. Petersen 138c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 139c72758f3SMartin K. Petersen { 140c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 141c72758f3SMartin K. Petersen } 142c72758f3SMartin K. Petersen 143c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 144c72758f3SMartin K. Petersen { 145c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1468324aa91SJens Axboe } 1478324aa91SJens Axboe 14886b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 14986b37281SMartin K. Petersen { 15086b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 15186b37281SMartin K. Petersen } 15286b37281SMartin K. Petersen 15386b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 15486b37281SMartin K. Petersen { 15586b37281SMartin K. Petersen return queue_var_show(q->limits.max_discard_sectors << 9, page); 15686b37281SMartin K. Petersen } 15786b37281SMartin K. Petersen 15898262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 15998262f27SMartin K. Petersen { 16098262f27SMartin K. Petersen return queue_var_show(queue_discard_zeroes_data(q), page); 16198262f27SMartin K. Petersen } 16298262f27SMartin K. Petersen 1638324aa91SJens Axboe static ssize_t 1648324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 1658324aa91SJens Axboe { 1668324aa91SJens Axboe unsigned long max_sectors_kb, 167ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 1688324aa91SJens Axboe page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 1698324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 1708324aa91SJens Axboe 1718324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 1728324aa91SJens Axboe return -EINVAL; 1737c239517SWu Fengguang 1748324aa91SJens Axboe spin_lock_irq(q->queue_lock); 175c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 1768324aa91SJens Axboe spin_unlock_irq(q->queue_lock); 1778324aa91SJens Axboe 1788324aa91SJens Axboe return ret; 1798324aa91SJens Axboe } 1808324aa91SJens Axboe 1818324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 1828324aa91SJens Axboe { 183ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 1848324aa91SJens Axboe 1858324aa91SJens Axboe return queue_var_show(max_hw_sectors_kb, (page)); 1868324aa91SJens Axboe } 1878324aa91SJens Axboe 188956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 189956bcb7cSJens Axboe static ssize_t \ 190956bcb7cSJens Axboe queue_show_##name(struct request_queue *q, char *page) \ 191956bcb7cSJens Axboe { \ 192956bcb7cSJens Axboe int bit; \ 193956bcb7cSJens Axboe bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 194956bcb7cSJens Axboe return queue_var_show(neg ? !bit : bit, page); \ 195956bcb7cSJens Axboe } \ 196956bcb7cSJens Axboe static ssize_t \ 197956bcb7cSJens Axboe queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 198956bcb7cSJens Axboe { \ 199956bcb7cSJens Axboe unsigned long val; \ 200956bcb7cSJens Axboe ssize_t ret; \ 201956bcb7cSJens Axboe ret = queue_var_store(&val, page, count); \ 202956bcb7cSJens Axboe if (neg) \ 203956bcb7cSJens Axboe val = !val; \ 204956bcb7cSJens Axboe \ 205956bcb7cSJens Axboe spin_lock_irq(q->queue_lock); \ 206956bcb7cSJens Axboe if (val) \ 207956bcb7cSJens Axboe queue_flag_set(QUEUE_FLAG_##flag, q); \ 208956bcb7cSJens Axboe else \ 209956bcb7cSJens Axboe queue_flag_clear(QUEUE_FLAG_##flag, q); \ 210956bcb7cSJens Axboe spin_unlock_irq(q->queue_lock); \ 211956bcb7cSJens Axboe return ret; \ 2121308835fSBartlomiej Zolnierkiewicz } 2131308835fSBartlomiej Zolnierkiewicz 214956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 215956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 216956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 217956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS 2181308835fSBartlomiej Zolnierkiewicz 219ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 220ac9fafa1SAlan D. Brunelle { 221488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 222488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 223ac9fafa1SAlan D. Brunelle } 224ac9fafa1SAlan D. Brunelle 225ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 226ac9fafa1SAlan D. Brunelle size_t count) 227ac9fafa1SAlan D. Brunelle { 228ac9fafa1SAlan D. Brunelle unsigned long nm; 229ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 230ac9fafa1SAlan D. Brunelle 231bf0f9702SJens Axboe spin_lock_irq(q->queue_lock); 232bf0f9702SJens Axboe queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 233488991e2SAlan D. Brunelle queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 234488991e2SAlan D. Brunelle if (nm == 2) 235488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOMERGES, q); 236488991e2SAlan D. Brunelle else if (nm) 237488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 238bf0f9702SJens Axboe spin_unlock_irq(q->queue_lock); 2391308835fSBartlomiej Zolnierkiewicz 240ac9fafa1SAlan D. Brunelle return ret; 241ac9fafa1SAlan D. Brunelle } 242ac9fafa1SAlan D. Brunelle 243c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 244c7c22e4dSJens Axboe { 2459cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 246c7c22e4dSJens Axboe 2479cb308ceSXiaotian Feng return queue_var_show(set, page); 248c7c22e4dSJens Axboe } 249c7c22e4dSJens Axboe 250c7c22e4dSJens Axboe static ssize_t 251c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 252c7c22e4dSJens Axboe { 253c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 254c7c22e4dSJens Axboe #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) 255c7c22e4dSJens Axboe unsigned long val; 256c7c22e4dSJens Axboe 257c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 258c7c22e4dSJens Axboe spin_lock_irq(q->queue_lock); 259c7c22e4dSJens Axboe if (val) 260c7c22e4dSJens Axboe queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 261c7c22e4dSJens Axboe else 262c7c22e4dSJens Axboe queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 263c7c22e4dSJens Axboe spin_unlock_irq(q->queue_lock); 264c7c22e4dSJens Axboe #endif 265c7c22e4dSJens Axboe return ret; 266c7c22e4dSJens Axboe } 2678324aa91SJens Axboe 2688324aa91SJens Axboe static struct queue_sysfs_entry queue_requests_entry = { 2698324aa91SJens Axboe .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 2708324aa91SJens Axboe .show = queue_requests_show, 2718324aa91SJens Axboe .store = queue_requests_store, 2728324aa91SJens Axboe }; 2738324aa91SJens Axboe 2748324aa91SJens Axboe static struct queue_sysfs_entry queue_ra_entry = { 2758324aa91SJens Axboe .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, 2768324aa91SJens Axboe .show = queue_ra_show, 2778324aa91SJens Axboe .store = queue_ra_store, 2788324aa91SJens Axboe }; 2798324aa91SJens Axboe 2808324aa91SJens Axboe static struct queue_sysfs_entry queue_max_sectors_entry = { 2818324aa91SJens Axboe .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, 2828324aa91SJens Axboe .show = queue_max_sectors_show, 2838324aa91SJens Axboe .store = queue_max_sectors_store, 2848324aa91SJens Axboe }; 2858324aa91SJens Axboe 2868324aa91SJens Axboe static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 2878324aa91SJens Axboe .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, 2888324aa91SJens Axboe .show = queue_max_hw_sectors_show, 2898324aa91SJens Axboe }; 2908324aa91SJens Axboe 291c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segments_entry = { 292c77a5710SMartin K. Petersen .attr = {.name = "max_segments", .mode = S_IRUGO }, 293c77a5710SMartin K. Petersen .show = queue_max_segments_show, 294c77a5710SMartin K. Petersen }; 295c77a5710SMartin K. Petersen 29613f05c8dSMartin K. Petersen static struct queue_sysfs_entry queue_max_integrity_segments_entry = { 29713f05c8dSMartin K. Petersen .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, 29813f05c8dSMartin K. Petersen .show = queue_max_integrity_segments_show, 29913f05c8dSMartin K. Petersen }; 30013f05c8dSMartin K. Petersen 301c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segment_size_entry = { 302c77a5710SMartin K. Petersen .attr = {.name = "max_segment_size", .mode = S_IRUGO }, 303c77a5710SMartin K. Petersen .show = queue_max_segment_size_show, 304c77a5710SMartin K. Petersen }; 305c77a5710SMartin K. Petersen 3068324aa91SJens Axboe static struct queue_sysfs_entry queue_iosched_entry = { 3078324aa91SJens Axboe .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 3088324aa91SJens Axboe .show = elv_iosched_show, 3098324aa91SJens Axboe .store = elv_iosched_store, 3108324aa91SJens Axboe }; 3118324aa91SJens Axboe 312e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 313e68b903cSMartin K. Petersen .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 314e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 315e1defc4fSMartin K. Petersen }; 316e1defc4fSMartin K. Petersen 317e1defc4fSMartin K. Petersen static struct queue_sysfs_entry queue_logical_block_size_entry = { 318e1defc4fSMartin K. Petersen .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 319e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 320e68b903cSMartin K. Petersen }; 321e68b903cSMartin K. Petersen 322c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_physical_block_size_entry = { 323c72758f3SMartin K. Petersen .attr = {.name = "physical_block_size", .mode = S_IRUGO }, 324c72758f3SMartin K. Petersen .show = queue_physical_block_size_show, 325c72758f3SMartin K. Petersen }; 326c72758f3SMartin K. Petersen 327c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_min_entry = { 328c72758f3SMartin K. Petersen .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, 329c72758f3SMartin K. Petersen .show = queue_io_min_show, 330c72758f3SMartin K. Petersen }; 331c72758f3SMartin K. Petersen 332c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_opt_entry = { 333c72758f3SMartin K. Petersen .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, 334c72758f3SMartin K. Petersen .show = queue_io_opt_show, 3358324aa91SJens Axboe }; 3368324aa91SJens Axboe 33786b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_granularity_entry = { 33886b37281SMartin K. Petersen .attr = {.name = "discard_granularity", .mode = S_IRUGO }, 33986b37281SMartin K. Petersen .show = queue_discard_granularity_show, 34086b37281SMartin K. Petersen }; 34186b37281SMartin K. Petersen 34286b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_max_entry = { 34386b37281SMartin K. Petersen .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, 34486b37281SMartin K. Petersen .show = queue_discard_max_show, 34586b37281SMartin K. Petersen }; 34686b37281SMartin K. Petersen 34798262f27SMartin K. Petersen static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 34898262f27SMartin K. Petersen .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, 34998262f27SMartin K. Petersen .show = queue_discard_zeroes_data_show, 35098262f27SMartin K. Petersen }; 35198262f27SMartin K. Petersen 3521308835fSBartlomiej Zolnierkiewicz static struct queue_sysfs_entry queue_nonrot_entry = { 3531308835fSBartlomiej Zolnierkiewicz .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 354956bcb7cSJens Axboe .show = queue_show_nonrot, 355956bcb7cSJens Axboe .store = queue_store_nonrot, 3561308835fSBartlomiej Zolnierkiewicz }; 3571308835fSBartlomiej Zolnierkiewicz 358ac9fafa1SAlan D. Brunelle static struct queue_sysfs_entry queue_nomerges_entry = { 359ac9fafa1SAlan D. Brunelle .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, 360ac9fafa1SAlan D. Brunelle .show = queue_nomerges_show, 361ac9fafa1SAlan D. Brunelle .store = queue_nomerges_store, 362ac9fafa1SAlan D. Brunelle }; 363ac9fafa1SAlan D. Brunelle 364c7c22e4dSJens Axboe static struct queue_sysfs_entry queue_rq_affinity_entry = { 365c7c22e4dSJens Axboe .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, 366c7c22e4dSJens Axboe .show = queue_rq_affinity_show, 367c7c22e4dSJens Axboe .store = queue_rq_affinity_store, 368c7c22e4dSJens Axboe }; 369c7c22e4dSJens Axboe 370bc58ba94SJens Axboe static struct queue_sysfs_entry queue_iostats_entry = { 371bc58ba94SJens Axboe .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 372956bcb7cSJens Axboe .show = queue_show_iostats, 373956bcb7cSJens Axboe .store = queue_store_iostats, 374bc58ba94SJens Axboe }; 375bc58ba94SJens Axboe 376e2e1a148SJens Axboe static struct queue_sysfs_entry queue_random_entry = { 377e2e1a148SJens Axboe .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, 378956bcb7cSJens Axboe .show = queue_show_random, 379956bcb7cSJens Axboe .store = queue_store_random, 380e2e1a148SJens Axboe }; 381e2e1a148SJens Axboe 3828324aa91SJens Axboe static struct attribute *default_attrs[] = { 3838324aa91SJens Axboe &queue_requests_entry.attr, 3848324aa91SJens Axboe &queue_ra_entry.attr, 3858324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 3868324aa91SJens Axboe &queue_max_sectors_entry.attr, 387c77a5710SMartin K. Petersen &queue_max_segments_entry.attr, 38813f05c8dSMartin K. Petersen &queue_max_integrity_segments_entry.attr, 389c77a5710SMartin K. Petersen &queue_max_segment_size_entry.attr, 3908324aa91SJens Axboe &queue_iosched_entry.attr, 391e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 392e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 393c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 394c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 395c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 39686b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 39786b37281SMartin K. Petersen &queue_discard_max_entry.attr, 39898262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 3991308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 400ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 401c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 402bc58ba94SJens Axboe &queue_iostats_entry.attr, 403e2e1a148SJens Axboe &queue_random_entry.attr, 4048324aa91SJens Axboe NULL, 4058324aa91SJens Axboe }; 4068324aa91SJens Axboe 4078324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 4088324aa91SJens Axboe 4098324aa91SJens Axboe static ssize_t 4108324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4118324aa91SJens Axboe { 4128324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 4138324aa91SJens Axboe struct request_queue *q = 4148324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 4158324aa91SJens Axboe ssize_t res; 4168324aa91SJens Axboe 4178324aa91SJens Axboe if (!entry->show) 4188324aa91SJens Axboe return -EIO; 4198324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 4208324aa91SJens Axboe if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 4218324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4228324aa91SJens Axboe return -ENOENT; 4238324aa91SJens Axboe } 4248324aa91SJens Axboe res = entry->show(q, page); 4258324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4268324aa91SJens Axboe return res; 4278324aa91SJens Axboe } 4288324aa91SJens Axboe 4298324aa91SJens Axboe static ssize_t 4308324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 4318324aa91SJens Axboe const char *page, size_t length) 4328324aa91SJens Axboe { 4338324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 4346728cb0eSJens Axboe struct request_queue *q; 4358324aa91SJens Axboe ssize_t res; 4368324aa91SJens Axboe 4378324aa91SJens Axboe if (!entry->store) 4388324aa91SJens Axboe return -EIO; 4396728cb0eSJens Axboe 4406728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 4418324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 4428324aa91SJens Axboe if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 4438324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4448324aa91SJens Axboe return -ENOENT; 4458324aa91SJens Axboe } 4468324aa91SJens Axboe res = entry->store(q, page, length); 4478324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4488324aa91SJens Axboe return res; 4498324aa91SJens Axboe } 4508324aa91SJens Axboe 4518324aa91SJens Axboe /** 4528324aa91SJens Axboe * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed 4538324aa91SJens Axboe * @kobj: the kobj belonging of the request queue to be released 4548324aa91SJens Axboe * 4558324aa91SJens Axboe * Description: 4568324aa91SJens Axboe * blk_cleanup_queue is the pair to blk_init_queue() or 4578324aa91SJens Axboe * blk_queue_make_request(). It should be called when a request queue is 4588324aa91SJens Axboe * being released; typically when a block device is being de-registered. 4598324aa91SJens Axboe * Currently, its primary task it to free all the &struct request 4608324aa91SJens Axboe * structures that were allocated to the queue and the queue itself. 4618324aa91SJens Axboe * 4628324aa91SJens Axboe * Caveat: 4638324aa91SJens Axboe * Hopefully the low level driver will have finished any 4648324aa91SJens Axboe * outstanding requests first... 4658324aa91SJens Axboe **/ 4668324aa91SJens Axboe static void blk_release_queue(struct kobject *kobj) 4678324aa91SJens Axboe { 4688324aa91SJens Axboe struct request_queue *q = 4698324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 4708324aa91SJens Axboe struct request_list *rl = &q->rq; 4718324aa91SJens Axboe 4728324aa91SJens Axboe blk_sync_queue(q); 4738324aa91SJens Axboe 4747ad58c02SJens Axboe blk_throtl_exit(q); 4757ad58c02SJens Axboe 4768324aa91SJens Axboe if (rl->rq_pool) 4778324aa91SJens Axboe mempool_destroy(rl->rq_pool); 4788324aa91SJens Axboe 4798324aa91SJens Axboe if (q->queue_tags) 4808324aa91SJens Axboe __blk_queue_free_tags(q); 4818324aa91SJens Axboe 4828324aa91SJens Axboe blk_trace_shutdown(q); 4838324aa91SJens Axboe 4848324aa91SJens Axboe bdi_destroy(&q->backing_dev_info); 4858324aa91SJens Axboe kmem_cache_free(blk_requestq_cachep, q); 4868324aa91SJens Axboe } 4878324aa91SJens Axboe 48852cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = { 4898324aa91SJens Axboe .show = queue_attr_show, 4908324aa91SJens Axboe .store = queue_attr_store, 4918324aa91SJens Axboe }; 4928324aa91SJens Axboe 4938324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 4948324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 4958324aa91SJens Axboe .default_attrs = default_attrs, 4968324aa91SJens Axboe .release = blk_release_queue, 4978324aa91SJens Axboe }; 4988324aa91SJens Axboe 4998324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 5008324aa91SJens Axboe { 5018324aa91SJens Axboe int ret; 5021d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 5038324aa91SJens Axboe 5048324aa91SJens Axboe struct request_queue *q = disk->queue; 5058324aa91SJens Axboe 506fb199746SAkinobu Mita if (WARN_ON(!q)) 5078324aa91SJens Axboe return -ENXIO; 5088324aa91SJens Axboe 5091d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 5101d54ad6dSLi Zefan if (ret) 5111d54ad6dSLi Zefan return ret; 5121d54ad6dSLi Zefan 513c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 5148324aa91SJens Axboe if (ret < 0) 5158324aa91SJens Axboe return ret; 5168324aa91SJens Axboe 5178324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_ADD); 5188324aa91SJens Axboe 519cd43e26fSMartin K. Petersen if (!q->request_fn) 520cd43e26fSMartin K. Petersen return 0; 521cd43e26fSMartin K. Petersen 5228324aa91SJens Axboe ret = elv_register_queue(q); 5238324aa91SJens Axboe if (ret) { 5248324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 5258324aa91SJens Axboe kobject_del(&q->kobj); 52648c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 527c87ffbb8SXiaotian Feng kobject_put(&dev->kobj); 5288324aa91SJens Axboe return ret; 5298324aa91SJens Axboe } 5308324aa91SJens Axboe 5318324aa91SJens Axboe return 0; 5328324aa91SJens Axboe } 5338324aa91SJens Axboe 5348324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 5358324aa91SJens Axboe { 5368324aa91SJens Axboe struct request_queue *q = disk->queue; 5378324aa91SJens Axboe 538fb199746SAkinobu Mita if (WARN_ON(!q)) 539fb199746SAkinobu Mita return; 540fb199746SAkinobu Mita 54148c0d4d4SZdenek Kabelac if (q->request_fn) 5428324aa91SJens Axboe elv_unregister_queue(q); 5438324aa91SJens Axboe 5448324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 5458324aa91SJens Axboe kobject_del(&q->kobj); 54648c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 547ed9e1982STejun Heo kobject_put(&disk_to_dev(disk)->kobj); 5488324aa91SJens Axboe } 549