18324aa91SJens Axboe /* 28324aa91SJens Axboe * Functions related to sysfs handling 38324aa91SJens Axboe */ 48324aa91SJens Axboe #include <linux/kernel.h> 58324aa91SJens Axboe #include <linux/module.h> 68324aa91SJens Axboe #include <linux/bio.h> 78324aa91SJens Axboe #include <linux/blkdev.h> 88324aa91SJens Axboe #include <linux/blktrace_api.h> 98324aa91SJens Axboe 108324aa91SJens Axboe #include "blk.h" 118324aa91SJens Axboe 128324aa91SJens Axboe struct queue_sysfs_entry { 138324aa91SJens Axboe struct attribute attr; 148324aa91SJens Axboe ssize_t (*show)(struct request_queue *, char *); 158324aa91SJens Axboe ssize_t (*store)(struct request_queue *, const char *, size_t); 168324aa91SJens Axboe }; 178324aa91SJens Axboe 188324aa91SJens Axboe static ssize_t 199cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page) 208324aa91SJens Axboe { 219cb308ceSXiaotian Feng return sprintf(page, "%lu\n", var); 228324aa91SJens Axboe } 238324aa91SJens Axboe 248324aa91SJens Axboe static ssize_t 258324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count) 268324aa91SJens Axboe { 278324aa91SJens Axboe char *p = (char *) page; 288324aa91SJens Axboe 298324aa91SJens Axboe *var = simple_strtoul(p, &p, 10); 308324aa91SJens Axboe return count; 318324aa91SJens Axboe } 328324aa91SJens Axboe 338324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page) 348324aa91SJens Axboe { 358324aa91SJens Axboe return queue_var_show(q->nr_requests, (page)); 368324aa91SJens Axboe } 378324aa91SJens Axboe 388324aa91SJens Axboe static ssize_t 398324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count) 408324aa91SJens Axboe { 418324aa91SJens Axboe struct request_list *rl = &q->rq; 428324aa91SJens Axboe unsigned long nr; 43b8a9ae77SJens Axboe int ret; 44b8a9ae77SJens Axboe 45b8a9ae77SJens Axboe if (!q->request_fn) 46b8a9ae77SJens Axboe return -EINVAL; 47b8a9ae77SJens Axboe 48b8a9ae77SJens Axboe ret = queue_var_store(&nr, page, count); 498324aa91SJens Axboe if (nr < BLKDEV_MIN_RQ) 508324aa91SJens Axboe nr = BLKDEV_MIN_RQ; 518324aa91SJens Axboe 528324aa91SJens Axboe spin_lock_irq(q->queue_lock); 538324aa91SJens Axboe q->nr_requests = nr; 548324aa91SJens Axboe blk_queue_congestion_threshold(q); 558324aa91SJens Axboe 561faa16d2SJens Axboe if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) 571faa16d2SJens Axboe blk_set_queue_congested(q, BLK_RW_SYNC); 581faa16d2SJens Axboe else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) 591faa16d2SJens Axboe blk_clear_queue_congested(q, BLK_RW_SYNC); 608324aa91SJens Axboe 611faa16d2SJens Axboe if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) 621faa16d2SJens Axboe blk_set_queue_congested(q, BLK_RW_ASYNC); 631faa16d2SJens Axboe else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) 641faa16d2SJens Axboe blk_clear_queue_congested(q, BLK_RW_ASYNC); 658324aa91SJens Axboe 661faa16d2SJens Axboe if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 671faa16d2SJens Axboe blk_set_queue_full(q, BLK_RW_SYNC); 681faa16d2SJens Axboe } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { 691faa16d2SJens Axboe blk_clear_queue_full(q, BLK_RW_SYNC); 701faa16d2SJens Axboe wake_up(&rl->wait[BLK_RW_SYNC]); 718324aa91SJens Axboe } 728324aa91SJens Axboe 731faa16d2SJens Axboe if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 741faa16d2SJens Axboe blk_set_queue_full(q, BLK_RW_ASYNC); 751faa16d2SJens Axboe } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { 761faa16d2SJens Axboe blk_clear_queue_full(q, BLK_RW_ASYNC); 771faa16d2SJens Axboe wake_up(&rl->wait[BLK_RW_ASYNC]); 788324aa91SJens Axboe } 798324aa91SJens Axboe spin_unlock_irq(q->queue_lock); 808324aa91SJens Axboe return ret; 818324aa91SJens Axboe } 828324aa91SJens Axboe 838324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page) 848324aa91SJens Axboe { 859cb308ceSXiaotian Feng unsigned long ra_kb = q->backing_dev_info.ra_pages << 869cb308ceSXiaotian Feng (PAGE_CACHE_SHIFT - 10); 878324aa91SJens Axboe 888324aa91SJens Axboe return queue_var_show(ra_kb, (page)); 898324aa91SJens Axboe } 908324aa91SJens Axboe 918324aa91SJens Axboe static ssize_t 928324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count) 938324aa91SJens Axboe { 948324aa91SJens Axboe unsigned long ra_kb; 958324aa91SJens Axboe ssize_t ret = queue_var_store(&ra_kb, page, count); 968324aa91SJens Axboe 978324aa91SJens Axboe q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 988324aa91SJens Axboe 998324aa91SJens Axboe return ret; 1008324aa91SJens Axboe } 1018324aa91SJens Axboe 1028324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 1038324aa91SJens Axboe { 104ae03bf63SMartin K. Petersen int max_sectors_kb = queue_max_sectors(q) >> 1; 1058324aa91SJens Axboe 1068324aa91SJens Axboe return queue_var_show(max_sectors_kb, (page)); 1078324aa91SJens Axboe } 1088324aa91SJens Axboe 109e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 110e68b903cSMartin K. Petersen { 111e1defc4fSMartin K. Petersen return queue_var_show(queue_logical_block_size(q), page); 112e68b903cSMartin K. Petersen } 113e68b903cSMartin K. Petersen 114c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 115c72758f3SMartin K. Petersen { 116c72758f3SMartin K. Petersen return queue_var_show(queue_physical_block_size(q), page); 117c72758f3SMartin K. Petersen } 118c72758f3SMartin K. Petersen 119c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page) 120c72758f3SMartin K. Petersen { 121c72758f3SMartin K. Petersen return queue_var_show(queue_io_min(q), page); 122c72758f3SMartin K. Petersen } 123c72758f3SMartin K. Petersen 124c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 125c72758f3SMartin K. Petersen { 126c72758f3SMartin K. Petersen return queue_var_show(queue_io_opt(q), page); 1278324aa91SJens Axboe } 1288324aa91SJens Axboe 12986b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 13086b37281SMartin K. Petersen { 13186b37281SMartin K. Petersen return queue_var_show(q->limits.discard_granularity, page); 13286b37281SMartin K. Petersen } 13386b37281SMartin K. Petersen 13486b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 13586b37281SMartin K. Petersen { 13686b37281SMartin K. Petersen return queue_var_show(q->limits.max_discard_sectors << 9, page); 13786b37281SMartin K. Petersen } 13886b37281SMartin K. Petersen 13998262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 14098262f27SMartin K. Petersen { 14198262f27SMartin K. Petersen return queue_var_show(queue_discard_zeroes_data(q), page); 14298262f27SMartin K. Petersen } 14398262f27SMartin K. Petersen 1448324aa91SJens Axboe static ssize_t 1458324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 1468324aa91SJens Axboe { 1478324aa91SJens Axboe unsigned long max_sectors_kb, 148ae03bf63SMartin K. Petersen max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 1498324aa91SJens Axboe page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 1508324aa91SJens Axboe ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 1518324aa91SJens Axboe 1528324aa91SJens Axboe if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 1538324aa91SJens Axboe return -EINVAL; 1547c239517SWu Fengguang 1558324aa91SJens Axboe spin_lock_irq(q->queue_lock); 156c295fc05SNikanth Karthikesan q->limits.max_sectors = max_sectors_kb << 1; 1578324aa91SJens Axboe spin_unlock_irq(q->queue_lock); 1588324aa91SJens Axboe 1598324aa91SJens Axboe return ret; 1608324aa91SJens Axboe } 1618324aa91SJens Axboe 1628324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 1638324aa91SJens Axboe { 164ae03bf63SMartin K. Petersen int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 1658324aa91SJens Axboe 1668324aa91SJens Axboe return queue_var_show(max_hw_sectors_kb, (page)); 1678324aa91SJens Axboe } 1688324aa91SJens Axboe 1691308835fSBartlomiej Zolnierkiewicz static ssize_t queue_nonrot_show(struct request_queue *q, char *page) 1701308835fSBartlomiej Zolnierkiewicz { 1711308835fSBartlomiej Zolnierkiewicz return queue_var_show(!blk_queue_nonrot(q), page); 1721308835fSBartlomiej Zolnierkiewicz } 1731308835fSBartlomiej Zolnierkiewicz 1741308835fSBartlomiej Zolnierkiewicz static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, 1751308835fSBartlomiej Zolnierkiewicz size_t count) 1761308835fSBartlomiej Zolnierkiewicz { 1771308835fSBartlomiej Zolnierkiewicz unsigned long nm; 1781308835fSBartlomiej Zolnierkiewicz ssize_t ret = queue_var_store(&nm, page, count); 1791308835fSBartlomiej Zolnierkiewicz 1801308835fSBartlomiej Zolnierkiewicz spin_lock_irq(q->queue_lock); 1811308835fSBartlomiej Zolnierkiewicz if (nm) 1821308835fSBartlomiej Zolnierkiewicz queue_flag_clear(QUEUE_FLAG_NONROT, q); 1831308835fSBartlomiej Zolnierkiewicz else 1841308835fSBartlomiej Zolnierkiewicz queue_flag_set(QUEUE_FLAG_NONROT, q); 1851308835fSBartlomiej Zolnierkiewicz spin_unlock_irq(q->queue_lock); 1861308835fSBartlomiej Zolnierkiewicz 1871308835fSBartlomiej Zolnierkiewicz return ret; 1881308835fSBartlomiej Zolnierkiewicz } 1891308835fSBartlomiej Zolnierkiewicz 190ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 191ac9fafa1SAlan D. Brunelle { 192*488991e2SAlan D. Brunelle return queue_var_show((blk_queue_nomerges(q) << 1) | 193*488991e2SAlan D. Brunelle blk_queue_noxmerges(q), page); 194ac9fafa1SAlan D. Brunelle } 195ac9fafa1SAlan D. Brunelle 196ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 197ac9fafa1SAlan D. Brunelle size_t count) 198ac9fafa1SAlan D. Brunelle { 199ac9fafa1SAlan D. Brunelle unsigned long nm; 200ac9fafa1SAlan D. Brunelle ssize_t ret = queue_var_store(&nm, page, count); 201ac9fafa1SAlan D. Brunelle 202bf0f9702SJens Axboe spin_lock_irq(q->queue_lock); 203bf0f9702SJens Axboe queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 204*488991e2SAlan D. Brunelle queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 205*488991e2SAlan D. Brunelle if (nm == 2) 206*488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOMERGES, q); 207*488991e2SAlan D. Brunelle else if (nm) 208*488991e2SAlan D. Brunelle queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 209bf0f9702SJens Axboe spin_unlock_irq(q->queue_lock); 2101308835fSBartlomiej Zolnierkiewicz 211ac9fafa1SAlan D. Brunelle return ret; 212ac9fafa1SAlan D. Brunelle } 213ac9fafa1SAlan D. Brunelle 214c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 215c7c22e4dSJens Axboe { 2169cb308ceSXiaotian Feng bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 217c7c22e4dSJens Axboe 2189cb308ceSXiaotian Feng return queue_var_show(set, page); 219c7c22e4dSJens Axboe } 220c7c22e4dSJens Axboe 221c7c22e4dSJens Axboe static ssize_t 222c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 223c7c22e4dSJens Axboe { 224c7c22e4dSJens Axboe ssize_t ret = -EINVAL; 225c7c22e4dSJens Axboe #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) 226c7c22e4dSJens Axboe unsigned long val; 227c7c22e4dSJens Axboe 228c7c22e4dSJens Axboe ret = queue_var_store(&val, page, count); 229c7c22e4dSJens Axboe spin_lock_irq(q->queue_lock); 230c7c22e4dSJens Axboe if (val) 231c7c22e4dSJens Axboe queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 232c7c22e4dSJens Axboe else 233c7c22e4dSJens Axboe queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 234c7c22e4dSJens Axboe spin_unlock_irq(q->queue_lock); 235c7c22e4dSJens Axboe #endif 236c7c22e4dSJens Axboe return ret; 237c7c22e4dSJens Axboe } 2388324aa91SJens Axboe 239bc58ba94SJens Axboe static ssize_t queue_iostats_show(struct request_queue *q, char *page) 240bc58ba94SJens Axboe { 241bc58ba94SJens Axboe return queue_var_show(blk_queue_io_stat(q), page); 242bc58ba94SJens Axboe } 243bc58ba94SJens Axboe 244bc58ba94SJens Axboe static ssize_t queue_iostats_store(struct request_queue *q, const char *page, 245bc58ba94SJens Axboe size_t count) 246bc58ba94SJens Axboe { 247bc58ba94SJens Axboe unsigned long stats; 248bc58ba94SJens Axboe ssize_t ret = queue_var_store(&stats, page, count); 249bc58ba94SJens Axboe 250bc58ba94SJens Axboe spin_lock_irq(q->queue_lock); 251bc58ba94SJens Axboe if (stats) 252bc58ba94SJens Axboe queue_flag_set(QUEUE_FLAG_IO_STAT, q); 253bc58ba94SJens Axboe else 254bc58ba94SJens Axboe queue_flag_clear(QUEUE_FLAG_IO_STAT, q); 255bc58ba94SJens Axboe spin_unlock_irq(q->queue_lock); 256bc58ba94SJens Axboe 257bc58ba94SJens Axboe return ret; 258bc58ba94SJens Axboe } 259bc58ba94SJens Axboe 2608324aa91SJens Axboe static struct queue_sysfs_entry queue_requests_entry = { 2618324aa91SJens Axboe .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 2628324aa91SJens Axboe .show = queue_requests_show, 2638324aa91SJens Axboe .store = queue_requests_store, 2648324aa91SJens Axboe }; 2658324aa91SJens Axboe 2668324aa91SJens Axboe static struct queue_sysfs_entry queue_ra_entry = { 2678324aa91SJens Axboe .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, 2688324aa91SJens Axboe .show = queue_ra_show, 2698324aa91SJens Axboe .store = queue_ra_store, 2708324aa91SJens Axboe }; 2718324aa91SJens Axboe 2728324aa91SJens Axboe static struct queue_sysfs_entry queue_max_sectors_entry = { 2738324aa91SJens Axboe .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, 2748324aa91SJens Axboe .show = queue_max_sectors_show, 2758324aa91SJens Axboe .store = queue_max_sectors_store, 2768324aa91SJens Axboe }; 2778324aa91SJens Axboe 2788324aa91SJens Axboe static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 2798324aa91SJens Axboe .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, 2808324aa91SJens Axboe .show = queue_max_hw_sectors_show, 2818324aa91SJens Axboe }; 2828324aa91SJens Axboe 2838324aa91SJens Axboe static struct queue_sysfs_entry queue_iosched_entry = { 2848324aa91SJens Axboe .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 2858324aa91SJens Axboe .show = elv_iosched_show, 2868324aa91SJens Axboe .store = elv_iosched_store, 2878324aa91SJens Axboe }; 2888324aa91SJens Axboe 289e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = { 290e68b903cSMartin K. Petersen .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 291e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 292e1defc4fSMartin K. Petersen }; 293e1defc4fSMartin K. Petersen 294e1defc4fSMartin K. Petersen static struct queue_sysfs_entry queue_logical_block_size_entry = { 295e1defc4fSMartin K. Petersen .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 296e1defc4fSMartin K. Petersen .show = queue_logical_block_size_show, 297e68b903cSMartin K. Petersen }; 298e68b903cSMartin K. Petersen 299c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_physical_block_size_entry = { 300c72758f3SMartin K. Petersen .attr = {.name = "physical_block_size", .mode = S_IRUGO }, 301c72758f3SMartin K. Petersen .show = queue_physical_block_size_show, 302c72758f3SMartin K. Petersen }; 303c72758f3SMartin K. Petersen 304c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_min_entry = { 305c72758f3SMartin K. Petersen .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, 306c72758f3SMartin K. Petersen .show = queue_io_min_show, 307c72758f3SMartin K. Petersen }; 308c72758f3SMartin K. Petersen 309c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_opt_entry = { 310c72758f3SMartin K. Petersen .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, 311c72758f3SMartin K. Petersen .show = queue_io_opt_show, 3128324aa91SJens Axboe }; 3138324aa91SJens Axboe 31486b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_granularity_entry = { 31586b37281SMartin K. Petersen .attr = {.name = "discard_granularity", .mode = S_IRUGO }, 31686b37281SMartin K. Petersen .show = queue_discard_granularity_show, 31786b37281SMartin K. Petersen }; 31886b37281SMartin K. Petersen 31986b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_max_entry = { 32086b37281SMartin K. Petersen .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, 32186b37281SMartin K. Petersen .show = queue_discard_max_show, 32286b37281SMartin K. Petersen }; 32386b37281SMartin K. Petersen 32498262f27SMartin K. Petersen static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 32598262f27SMartin K. Petersen .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, 32698262f27SMartin K. Petersen .show = queue_discard_zeroes_data_show, 32798262f27SMartin K. Petersen }; 32898262f27SMartin K. Petersen 3291308835fSBartlomiej Zolnierkiewicz static struct queue_sysfs_entry queue_nonrot_entry = { 3301308835fSBartlomiej Zolnierkiewicz .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 3311308835fSBartlomiej Zolnierkiewicz .show = queue_nonrot_show, 3321308835fSBartlomiej Zolnierkiewicz .store = queue_nonrot_store, 3331308835fSBartlomiej Zolnierkiewicz }; 3341308835fSBartlomiej Zolnierkiewicz 335ac9fafa1SAlan D. Brunelle static struct queue_sysfs_entry queue_nomerges_entry = { 336ac9fafa1SAlan D. Brunelle .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, 337ac9fafa1SAlan D. Brunelle .show = queue_nomerges_show, 338ac9fafa1SAlan D. Brunelle .store = queue_nomerges_store, 339ac9fafa1SAlan D. Brunelle }; 340ac9fafa1SAlan D. Brunelle 341c7c22e4dSJens Axboe static struct queue_sysfs_entry queue_rq_affinity_entry = { 342c7c22e4dSJens Axboe .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, 343c7c22e4dSJens Axboe .show = queue_rq_affinity_show, 344c7c22e4dSJens Axboe .store = queue_rq_affinity_store, 345c7c22e4dSJens Axboe }; 346c7c22e4dSJens Axboe 347bc58ba94SJens Axboe static struct queue_sysfs_entry queue_iostats_entry = { 348bc58ba94SJens Axboe .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 349bc58ba94SJens Axboe .show = queue_iostats_show, 350bc58ba94SJens Axboe .store = queue_iostats_store, 351bc58ba94SJens Axboe }; 352bc58ba94SJens Axboe 3538324aa91SJens Axboe static struct attribute *default_attrs[] = { 3548324aa91SJens Axboe &queue_requests_entry.attr, 3558324aa91SJens Axboe &queue_ra_entry.attr, 3568324aa91SJens Axboe &queue_max_hw_sectors_entry.attr, 3578324aa91SJens Axboe &queue_max_sectors_entry.attr, 3588324aa91SJens Axboe &queue_iosched_entry.attr, 359e68b903cSMartin K. Petersen &queue_hw_sector_size_entry.attr, 360e1defc4fSMartin K. Petersen &queue_logical_block_size_entry.attr, 361c72758f3SMartin K. Petersen &queue_physical_block_size_entry.attr, 362c72758f3SMartin K. Petersen &queue_io_min_entry.attr, 363c72758f3SMartin K. Petersen &queue_io_opt_entry.attr, 36486b37281SMartin K. Petersen &queue_discard_granularity_entry.attr, 36586b37281SMartin K. Petersen &queue_discard_max_entry.attr, 36698262f27SMartin K. Petersen &queue_discard_zeroes_data_entry.attr, 3671308835fSBartlomiej Zolnierkiewicz &queue_nonrot_entry.attr, 368ac9fafa1SAlan D. Brunelle &queue_nomerges_entry.attr, 369c7c22e4dSJens Axboe &queue_rq_affinity_entry.attr, 370bc58ba94SJens Axboe &queue_iostats_entry.attr, 3718324aa91SJens Axboe NULL, 3728324aa91SJens Axboe }; 3738324aa91SJens Axboe 3748324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 3758324aa91SJens Axboe 3768324aa91SJens Axboe static ssize_t 3778324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3788324aa91SJens Axboe { 3798324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 3808324aa91SJens Axboe struct request_queue *q = 3818324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 3828324aa91SJens Axboe ssize_t res; 3838324aa91SJens Axboe 3848324aa91SJens Axboe if (!entry->show) 3858324aa91SJens Axboe return -EIO; 3868324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 3878324aa91SJens Axboe if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 3888324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 3898324aa91SJens Axboe return -ENOENT; 3908324aa91SJens Axboe } 3918324aa91SJens Axboe res = entry->show(q, page); 3928324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 3938324aa91SJens Axboe return res; 3948324aa91SJens Axboe } 3958324aa91SJens Axboe 3968324aa91SJens Axboe static ssize_t 3978324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr, 3988324aa91SJens Axboe const char *page, size_t length) 3998324aa91SJens Axboe { 4008324aa91SJens Axboe struct queue_sysfs_entry *entry = to_queue(attr); 4016728cb0eSJens Axboe struct request_queue *q; 4028324aa91SJens Axboe ssize_t res; 4038324aa91SJens Axboe 4048324aa91SJens Axboe if (!entry->store) 4058324aa91SJens Axboe return -EIO; 4066728cb0eSJens Axboe 4076728cb0eSJens Axboe q = container_of(kobj, struct request_queue, kobj); 4088324aa91SJens Axboe mutex_lock(&q->sysfs_lock); 4098324aa91SJens Axboe if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 4108324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4118324aa91SJens Axboe return -ENOENT; 4128324aa91SJens Axboe } 4138324aa91SJens Axboe res = entry->store(q, page, length); 4148324aa91SJens Axboe mutex_unlock(&q->sysfs_lock); 4158324aa91SJens Axboe return res; 4168324aa91SJens Axboe } 4178324aa91SJens Axboe 4188324aa91SJens Axboe /** 4198324aa91SJens Axboe * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed 4208324aa91SJens Axboe * @kobj: the kobj belonging of the request queue to be released 4218324aa91SJens Axboe * 4228324aa91SJens Axboe * Description: 4238324aa91SJens Axboe * blk_cleanup_queue is the pair to blk_init_queue() or 4248324aa91SJens Axboe * blk_queue_make_request(). It should be called when a request queue is 4258324aa91SJens Axboe * being released; typically when a block device is being de-registered. 4268324aa91SJens Axboe * Currently, its primary task it to free all the &struct request 4278324aa91SJens Axboe * structures that were allocated to the queue and the queue itself. 4288324aa91SJens Axboe * 4298324aa91SJens Axboe * Caveat: 4308324aa91SJens Axboe * Hopefully the low level driver will have finished any 4318324aa91SJens Axboe * outstanding requests first... 4328324aa91SJens Axboe **/ 4338324aa91SJens Axboe static void blk_release_queue(struct kobject *kobj) 4348324aa91SJens Axboe { 4358324aa91SJens Axboe struct request_queue *q = 4368324aa91SJens Axboe container_of(kobj, struct request_queue, kobj); 4378324aa91SJens Axboe struct request_list *rl = &q->rq; 4388324aa91SJens Axboe 4398324aa91SJens Axboe blk_sync_queue(q); 4408324aa91SJens Axboe 4418324aa91SJens Axboe if (rl->rq_pool) 4428324aa91SJens Axboe mempool_destroy(rl->rq_pool); 4438324aa91SJens Axboe 4448324aa91SJens Axboe if (q->queue_tags) 4458324aa91SJens Axboe __blk_queue_free_tags(q); 4468324aa91SJens Axboe 4478324aa91SJens Axboe blk_trace_shutdown(q); 4488324aa91SJens Axboe 4498324aa91SJens Axboe bdi_destroy(&q->backing_dev_info); 4508324aa91SJens Axboe kmem_cache_free(blk_requestq_cachep, q); 4518324aa91SJens Axboe } 4528324aa91SJens Axboe 4538324aa91SJens Axboe static struct sysfs_ops queue_sysfs_ops = { 4548324aa91SJens Axboe .show = queue_attr_show, 4558324aa91SJens Axboe .store = queue_attr_store, 4568324aa91SJens Axboe }; 4578324aa91SJens Axboe 4588324aa91SJens Axboe struct kobj_type blk_queue_ktype = { 4598324aa91SJens Axboe .sysfs_ops = &queue_sysfs_ops, 4608324aa91SJens Axboe .default_attrs = default_attrs, 4618324aa91SJens Axboe .release = blk_release_queue, 4628324aa91SJens Axboe }; 4638324aa91SJens Axboe 4648324aa91SJens Axboe int blk_register_queue(struct gendisk *disk) 4658324aa91SJens Axboe { 4668324aa91SJens Axboe int ret; 4671d54ad6dSLi Zefan struct device *dev = disk_to_dev(disk); 4688324aa91SJens Axboe 4698324aa91SJens Axboe struct request_queue *q = disk->queue; 4708324aa91SJens Axboe 471fb199746SAkinobu Mita if (WARN_ON(!q)) 4728324aa91SJens Axboe return -ENXIO; 4738324aa91SJens Axboe 4741d54ad6dSLi Zefan ret = blk_trace_init_sysfs(dev); 4751d54ad6dSLi Zefan if (ret) 4761d54ad6dSLi Zefan return ret; 4771d54ad6dSLi Zefan 478c9059598SLinus Torvalds ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 4798324aa91SJens Axboe if (ret < 0) 4808324aa91SJens Axboe return ret; 4818324aa91SJens Axboe 4828324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_ADD); 4838324aa91SJens Axboe 484cd43e26fSMartin K. Petersen if (!q->request_fn) 485cd43e26fSMartin K. Petersen return 0; 486cd43e26fSMartin K. Petersen 4878324aa91SJens Axboe ret = elv_register_queue(q); 4888324aa91SJens Axboe if (ret) { 4898324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 4908324aa91SJens Axboe kobject_del(&q->kobj); 49148c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 4928324aa91SJens Axboe return ret; 4938324aa91SJens Axboe } 4948324aa91SJens Axboe 4958324aa91SJens Axboe return 0; 4968324aa91SJens Axboe } 4978324aa91SJens Axboe 4988324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk) 4998324aa91SJens Axboe { 5008324aa91SJens Axboe struct request_queue *q = disk->queue; 5018324aa91SJens Axboe 502fb199746SAkinobu Mita if (WARN_ON(!q)) 503fb199746SAkinobu Mita return; 504fb199746SAkinobu Mita 50548c0d4d4SZdenek Kabelac if (q->request_fn) 5068324aa91SJens Axboe elv_unregister_queue(q); 5078324aa91SJens Axboe 5088324aa91SJens Axboe kobject_uevent(&q->kobj, KOBJ_REMOVE); 5098324aa91SJens Axboe kobject_del(&q->kobj); 51048c0d4d4SZdenek Kabelac blk_trace_remove_sysfs(disk_to_dev(disk)); 511ed9e1982STejun Heo kobject_put(&disk_to_dev(disk)->kobj); 5128324aa91SJens Axboe } 513