xref: /linux/block/blk-sysfs.c (revision b7143fe67bfc3b83a9e11371da659e1e70a1bbf3)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
28324aa91SJens Axboe /*
38324aa91SJens Axboe  * Functions related to sysfs handling
48324aa91SJens Axboe  */
58324aa91SJens Axboe #include <linux/kernel.h>
65a0e3ad6STejun Heo #include <linux/slab.h>
78324aa91SJens Axboe #include <linux/module.h>
88324aa91SJens Axboe #include <linux/bio.h>
98324aa91SJens Axboe #include <linux/blkdev.h>
1066114cadSTejun Heo #include <linux/backing-dev.h>
118324aa91SJens Axboe #include <linux/blktrace_api.h>
12320ae51fSJens Axboe #include <linux/blk-mq.h>
13eea8f41cSTejun Heo #include <linux/blk-cgroup.h>
148324aa91SJens Axboe 
158324aa91SJens Axboe #include "blk.h"
163edcc0ceSMing Lei #include "blk-mq.h"
17d173a251SOmar Sandoval #include "blk-mq-debugfs.h"
1887760e5eSJens Axboe #include "blk-wbt.h"
198324aa91SJens Axboe 
208324aa91SJens Axboe struct queue_sysfs_entry {
218324aa91SJens Axboe 	struct attribute attr;
228324aa91SJens Axboe 	ssize_t (*show)(struct request_queue *, char *);
238324aa91SJens Axboe 	ssize_t (*store)(struct request_queue *, const char *, size_t);
248324aa91SJens Axboe };
258324aa91SJens Axboe 
268324aa91SJens Axboe static ssize_t
279cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page)
288324aa91SJens Axboe {
299cb308ceSXiaotian Feng 	return sprintf(page, "%lu\n", var);
308324aa91SJens Axboe }
318324aa91SJens Axboe 
328324aa91SJens Axboe static ssize_t
338324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count)
348324aa91SJens Axboe {
35b1f3b64dSDave Reisner 	int err;
36b1f3b64dSDave Reisner 	unsigned long v;
378324aa91SJens Axboe 
38ed751e68SJingoo Han 	err = kstrtoul(page, 10, &v);
39b1f3b64dSDave Reisner 	if (err || v > UINT_MAX)
40b1f3b64dSDave Reisner 		return -EINVAL;
41b1f3b64dSDave Reisner 
42b1f3b64dSDave Reisner 	*var = v;
43b1f3b64dSDave Reisner 
448324aa91SJens Axboe 	return count;
458324aa91SJens Axboe }
468324aa91SJens Axboe 
4780e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page)
4887760e5eSJens Axboe {
4987760e5eSJens Axboe 	int err;
5080e091d1SJens Axboe 	s64 v;
5187760e5eSJens Axboe 
5280e091d1SJens Axboe 	err = kstrtos64(page, 10, &v);
5387760e5eSJens Axboe 	if (err < 0)
5487760e5eSJens Axboe 		return err;
5587760e5eSJens Axboe 
5687760e5eSJens Axboe 	*var = v;
5787760e5eSJens Axboe 	return 0;
5887760e5eSJens Axboe }
5987760e5eSJens Axboe 
608324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page)
618324aa91SJens Axboe {
628324aa91SJens Axboe 	return queue_var_show(q->nr_requests, (page));
638324aa91SJens Axboe }
648324aa91SJens Axboe 
658324aa91SJens Axboe static ssize_t
668324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count)
678324aa91SJens Axboe {
688324aa91SJens Axboe 	unsigned long nr;
69e3a2b3f9SJens Axboe 	int ret, err;
70b8a9ae77SJens Axboe 
71344e9ffcSJens Axboe 	if (!queue_is_mq(q))
72b8a9ae77SJens Axboe 		return -EINVAL;
73b8a9ae77SJens Axboe 
74b8a9ae77SJens Axboe 	ret = queue_var_store(&nr, page, count);
75b1f3b64dSDave Reisner 	if (ret < 0)
76b1f3b64dSDave Reisner 		return ret;
77b1f3b64dSDave Reisner 
788324aa91SJens Axboe 	if (nr < BLKDEV_MIN_RQ)
798324aa91SJens Axboe 		nr = BLKDEV_MIN_RQ;
808324aa91SJens Axboe 
81e3a2b3f9SJens Axboe 	err = blk_mq_update_nr_requests(q, nr);
82e3a2b3f9SJens Axboe 	if (err)
83e3a2b3f9SJens Axboe 		return err;
84a051661cSTejun Heo 
858324aa91SJens Axboe 	return ret;
868324aa91SJens Axboe }
878324aa91SJens Axboe 
888324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page)
898324aa91SJens Axboe {
90dc3b17ccSJan Kara 	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
9109cbfeafSKirill A. Shutemov 					(PAGE_SHIFT - 10);
928324aa91SJens Axboe 
938324aa91SJens Axboe 	return queue_var_show(ra_kb, (page));
948324aa91SJens Axboe }
958324aa91SJens Axboe 
968324aa91SJens Axboe static ssize_t
978324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count)
988324aa91SJens Axboe {
998324aa91SJens Axboe 	unsigned long ra_kb;
1008324aa91SJens Axboe 	ssize_t ret = queue_var_store(&ra_kb, page, count);
1018324aa91SJens Axboe 
102b1f3b64dSDave Reisner 	if (ret < 0)
103b1f3b64dSDave Reisner 		return ret;
104b1f3b64dSDave Reisner 
105dc3b17ccSJan Kara 	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
1068324aa91SJens Axboe 
1078324aa91SJens Axboe 	return ret;
1088324aa91SJens Axboe }
1098324aa91SJens Axboe 
1108324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
1118324aa91SJens Axboe {
112ae03bf63SMartin K. Petersen 	int max_sectors_kb = queue_max_sectors(q) >> 1;
1138324aa91SJens Axboe 
1148324aa91SJens Axboe 	return queue_var_show(max_sectors_kb, (page));
1158324aa91SJens Axboe }
1168324aa91SJens Axboe 
117c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
118c77a5710SMartin K. Petersen {
119c77a5710SMartin K. Petersen 	return queue_var_show(queue_max_segments(q), (page));
120c77a5710SMartin K. Petersen }
121c77a5710SMartin K. Petersen 
1221e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q,
1231e739730SChristoph Hellwig 		char *page)
1241e739730SChristoph Hellwig {
1251e739730SChristoph Hellwig 	return queue_var_show(queue_max_discard_segments(q), (page));
1261e739730SChristoph Hellwig }
1271e739730SChristoph Hellwig 
12813f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
12913f05c8dSMartin K. Petersen {
13013f05c8dSMartin K. Petersen 	return queue_var_show(q->limits.max_integrity_segments, (page));
13113f05c8dSMartin K. Petersen }
13213f05c8dSMartin K. Petersen 
133c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
134c77a5710SMartin K. Petersen {
135c77a5710SMartin K. Petersen 	return queue_var_show(queue_max_segment_size(q), (page));
136c77a5710SMartin K. Petersen }
137c77a5710SMartin K. Petersen 
138e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
139e68b903cSMartin K. Petersen {
140e1defc4fSMartin K. Petersen 	return queue_var_show(queue_logical_block_size(q), page);
141e68b903cSMartin K. Petersen }
142e68b903cSMartin K. Petersen 
143c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
144c72758f3SMartin K. Petersen {
145c72758f3SMartin K. Petersen 	return queue_var_show(queue_physical_block_size(q), page);
146c72758f3SMartin K. Petersen }
147c72758f3SMartin K. Petersen 
14887caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
14987caf97cSHannes Reinecke {
15087caf97cSHannes Reinecke 	return queue_var_show(q->limits.chunk_sectors, page);
15187caf97cSHannes Reinecke }
15287caf97cSHannes Reinecke 
153c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page)
154c72758f3SMartin K. Petersen {
155c72758f3SMartin K. Petersen 	return queue_var_show(queue_io_min(q), page);
156c72758f3SMartin K. Petersen }
157c72758f3SMartin K. Petersen 
158c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
159c72758f3SMartin K. Petersen {
160c72758f3SMartin K. Petersen 	return queue_var_show(queue_io_opt(q), page);
1618324aa91SJens Axboe }
1628324aa91SJens Axboe 
16386b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
16486b37281SMartin K. Petersen {
16586b37281SMartin K. Petersen 	return queue_var_show(q->limits.discard_granularity, page);
16686b37281SMartin K. Petersen }
16786b37281SMartin K. Petersen 
1680034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
1690034af03SJens Axboe {
1700034af03SJens Axboe 
17118f922d0SAlan 	return sprintf(page, "%llu\n",
17218f922d0SAlan 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
1730034af03SJens Axboe }
1740034af03SJens Axboe 
17586b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
17686b37281SMartin K. Petersen {
177a934a00aSMartin K. Petersen 	return sprintf(page, "%llu\n",
178a934a00aSMartin K. Petersen 		       (unsigned long long)q->limits.max_discard_sectors << 9);
17986b37281SMartin K. Petersen }
18086b37281SMartin K. Petersen 
1810034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q,
1820034af03SJens Axboe 				       const char *page, size_t count)
1830034af03SJens Axboe {
1840034af03SJens Axboe 	unsigned long max_discard;
1850034af03SJens Axboe 	ssize_t ret = queue_var_store(&max_discard, page, count);
1860034af03SJens Axboe 
1870034af03SJens Axboe 	if (ret < 0)
1880034af03SJens Axboe 		return ret;
1890034af03SJens Axboe 
1900034af03SJens Axboe 	if (max_discard & (q->limits.discard_granularity - 1))
1910034af03SJens Axboe 		return -EINVAL;
1920034af03SJens Axboe 
1930034af03SJens Axboe 	max_discard >>= 9;
1940034af03SJens Axboe 	if (max_discard > UINT_MAX)
1950034af03SJens Axboe 		return -EINVAL;
1960034af03SJens Axboe 
1970034af03SJens Axboe 	if (max_discard > q->limits.max_hw_discard_sectors)
1980034af03SJens Axboe 		max_discard = q->limits.max_hw_discard_sectors;
1990034af03SJens Axboe 
2000034af03SJens Axboe 	q->limits.max_discard_sectors = max_discard;
2010034af03SJens Axboe 	return ret;
2020034af03SJens Axboe }
2030034af03SJens Axboe 
20498262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
20598262f27SMartin K. Petersen {
20648920ff2SChristoph Hellwig 	return queue_var_show(0, page);
20798262f27SMartin K. Petersen }
20898262f27SMartin K. Petersen 
2094363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
2104363ac7cSMartin K. Petersen {
2114363ac7cSMartin K. Petersen 	return sprintf(page, "%llu\n",
2124363ac7cSMartin K. Petersen 		(unsigned long long)q->limits.max_write_same_sectors << 9);
2134363ac7cSMartin K. Petersen }
2144363ac7cSMartin K. Petersen 
215a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
216a6f0788eSChaitanya Kulkarni {
217a6f0788eSChaitanya Kulkarni 	return sprintf(page, "%llu\n",
218a6f0788eSChaitanya Kulkarni 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
219a6f0788eSChaitanya Kulkarni }
2204363ac7cSMartin K. Petersen 
2218324aa91SJens Axboe static ssize_t
2228324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
2238324aa91SJens Axboe {
2248324aa91SJens Axboe 	unsigned long max_sectors_kb,
225ae03bf63SMartin K. Petersen 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
22609cbfeafSKirill A. Shutemov 			page_kb = 1 << (PAGE_SHIFT - 10);
2278324aa91SJens Axboe 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
2288324aa91SJens Axboe 
229b1f3b64dSDave Reisner 	if (ret < 0)
230b1f3b64dSDave Reisner 		return ret;
231b1f3b64dSDave Reisner 
232ca369d51SMartin K. Petersen 	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
233ca369d51SMartin K. Petersen 					 q->limits.max_dev_sectors >> 1);
234ca369d51SMartin K. Petersen 
2358324aa91SJens Axboe 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
2368324aa91SJens Axboe 		return -EINVAL;
2377c239517SWu Fengguang 
2380d945c1fSChristoph Hellwig 	spin_lock_irq(&q->queue_lock);
239c295fc05SNikanth Karthikesan 	q->limits.max_sectors = max_sectors_kb << 1;
240dc3b17ccSJan Kara 	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
2410d945c1fSChristoph Hellwig 	spin_unlock_irq(&q->queue_lock);
2428324aa91SJens Axboe 
2438324aa91SJens Axboe 	return ret;
2448324aa91SJens Axboe }
2458324aa91SJens Axboe 
2468324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
2478324aa91SJens Axboe {
248ae03bf63SMartin K. Petersen 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
2498324aa91SJens Axboe 
2508324aa91SJens Axboe 	return queue_var_show(max_hw_sectors_kb, (page));
2518324aa91SJens Axboe }
2528324aa91SJens Axboe 
253956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
254956bcb7cSJens Axboe static ssize_t								\
255956bcb7cSJens Axboe queue_show_##name(struct request_queue *q, char *page)			\
256956bcb7cSJens Axboe {									\
257956bcb7cSJens Axboe 	int bit;							\
258956bcb7cSJens Axboe 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
259956bcb7cSJens Axboe 	return queue_var_show(neg ? !bit : bit, page);			\
260956bcb7cSJens Axboe }									\
261956bcb7cSJens Axboe static ssize_t								\
262956bcb7cSJens Axboe queue_store_##name(struct request_queue *q, const char *page, size_t count) \
263956bcb7cSJens Axboe {									\
264956bcb7cSJens Axboe 	unsigned long val;						\
265956bcb7cSJens Axboe 	ssize_t ret;							\
266956bcb7cSJens Axboe 	ret = queue_var_store(&val, page, count);			\
267c678ef52SArnd Bergmann 	if (ret < 0)							\
268c678ef52SArnd Bergmann 		 return ret;						\
269956bcb7cSJens Axboe 	if (neg)							\
270956bcb7cSJens Axboe 		val = !val;						\
271956bcb7cSJens Axboe 									\
272956bcb7cSJens Axboe 	if (val)							\
2738814ce8aSBart Van Assche 		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
274956bcb7cSJens Axboe 	else								\
2758814ce8aSBart Van Assche 		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
276956bcb7cSJens Axboe 	return ret;							\
2771308835fSBartlomiej Zolnierkiewicz }
2781308835fSBartlomiej Zolnierkiewicz 
279956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
280956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
281956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
282956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS
2831308835fSBartlomiej Zolnierkiewicz 
284797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page)
285797476b8SDamien Le Moal {
286797476b8SDamien Le Moal 	switch (blk_queue_zoned_model(q)) {
287797476b8SDamien Le Moal 	case BLK_ZONED_HA:
288797476b8SDamien Le Moal 		return sprintf(page, "host-aware\n");
289797476b8SDamien Le Moal 	case BLK_ZONED_HM:
290797476b8SDamien Le Moal 		return sprintf(page, "host-managed\n");
291797476b8SDamien Le Moal 	default:
292797476b8SDamien Le Moal 		return sprintf(page, "none\n");
293797476b8SDamien Le Moal 	}
294797476b8SDamien Le Moal }
295797476b8SDamien Le Moal 
296965b652eSDamien Le Moal static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
297965b652eSDamien Le Moal {
298965b652eSDamien Le Moal 	return queue_var_show(blk_queue_nr_zones(q), page);
299965b652eSDamien Le Moal }
300965b652eSDamien Le Moal 
301ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
302ac9fafa1SAlan D. Brunelle {
303488991e2SAlan D. Brunelle 	return queue_var_show((blk_queue_nomerges(q) << 1) |
304488991e2SAlan D. Brunelle 			       blk_queue_noxmerges(q), page);
305ac9fafa1SAlan D. Brunelle }
306ac9fafa1SAlan D. Brunelle 
307ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
308ac9fafa1SAlan D. Brunelle 				    size_t count)
309ac9fafa1SAlan D. Brunelle {
310ac9fafa1SAlan D. Brunelle 	unsigned long nm;
311ac9fafa1SAlan D. Brunelle 	ssize_t ret = queue_var_store(&nm, page, count);
312ac9fafa1SAlan D. Brunelle 
313b1f3b64dSDave Reisner 	if (ret < 0)
314b1f3b64dSDave Reisner 		return ret;
315b1f3b64dSDave Reisner 
31657d74df9SChristoph Hellwig 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
31757d74df9SChristoph Hellwig 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
318488991e2SAlan D. Brunelle 	if (nm == 2)
31957d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
320488991e2SAlan D. Brunelle 	else if (nm)
32157d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3221308835fSBartlomiej Zolnierkiewicz 
323ac9fafa1SAlan D. Brunelle 	return ret;
324ac9fafa1SAlan D. Brunelle }
325ac9fafa1SAlan D. Brunelle 
326c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
327c7c22e4dSJens Axboe {
3289cb308ceSXiaotian Feng 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
3295757a6d7SDan Williams 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
330c7c22e4dSJens Axboe 
3315757a6d7SDan Williams 	return queue_var_show(set << force, page);
332c7c22e4dSJens Axboe }
333c7c22e4dSJens Axboe 
334c7c22e4dSJens Axboe static ssize_t
335c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
336c7c22e4dSJens Axboe {
337c7c22e4dSJens Axboe 	ssize_t ret = -EINVAL;
3380a06ff06SChristoph Hellwig #ifdef CONFIG_SMP
339c7c22e4dSJens Axboe 	unsigned long val;
340c7c22e4dSJens Axboe 
341c7c22e4dSJens Axboe 	ret = queue_var_store(&val, page, count);
342b1f3b64dSDave Reisner 	if (ret < 0)
343b1f3b64dSDave Reisner 		return ret;
344b1f3b64dSDave Reisner 
345e8037d49SEric Seppanen 	if (val == 2) {
34657d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
34757d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
348e8037d49SEric Seppanen 	} else if (val == 1) {
34957d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
35057d74df9SChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
351e8037d49SEric Seppanen 	} else if (val == 0) {
35257d74df9SChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
35357d74df9SChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
3545757a6d7SDan Williams 	}
355c7c22e4dSJens Axboe #endif
356c7c22e4dSJens Axboe 	return ret;
357c7c22e4dSJens Axboe }
3588324aa91SJens Axboe 
35906426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
36006426adfSJens Axboe {
36164f1c21eSJens Axboe 	int val;
36264f1c21eSJens Axboe 
36364f1c21eSJens Axboe 	if (q->poll_nsec == -1)
36464f1c21eSJens Axboe 		val = -1;
36564f1c21eSJens Axboe 	else
36664f1c21eSJens Axboe 		val = q->poll_nsec / 1000;
36764f1c21eSJens Axboe 
36864f1c21eSJens Axboe 	return sprintf(page, "%d\n", val);
36906426adfSJens Axboe }
37006426adfSJens Axboe 
37106426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
37206426adfSJens Axboe 				size_t count)
37306426adfSJens Axboe {
37464f1c21eSJens Axboe 	int err, val;
37506426adfSJens Axboe 
37606426adfSJens Axboe 	if (!q->mq_ops || !q->mq_ops->poll)
37706426adfSJens Axboe 		return -EINVAL;
37806426adfSJens Axboe 
37964f1c21eSJens Axboe 	err = kstrtoint(page, 10, &val);
38064f1c21eSJens Axboe 	if (err < 0)
38164f1c21eSJens Axboe 		return err;
38206426adfSJens Axboe 
38364f1c21eSJens Axboe 	if (val == -1)
38464f1c21eSJens Axboe 		q->poll_nsec = -1;
38564f1c21eSJens Axboe 	else
38664f1c21eSJens Axboe 		q->poll_nsec = val * 1000;
38764f1c21eSJens Axboe 
38864f1c21eSJens Axboe 	return count;
38906426adfSJens Axboe }
39006426adfSJens Axboe 
39105229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page)
39205229beeSJens Axboe {
39305229beeSJens Axboe 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
39405229beeSJens Axboe }
39505229beeSJens Axboe 
39605229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page,
39705229beeSJens Axboe 				size_t count)
39805229beeSJens Axboe {
39905229beeSJens Axboe 	unsigned long poll_on;
40005229beeSJens Axboe 	ssize_t ret;
40105229beeSJens Axboe 
402cd19181bSMing Lei 	if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
403cd19181bSMing Lei 	    !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
40405229beeSJens Axboe 		return -EINVAL;
40505229beeSJens Axboe 
40605229beeSJens Axboe 	ret = queue_var_store(&poll_on, page, count);
40705229beeSJens Axboe 	if (ret < 0)
40805229beeSJens Axboe 		return ret;
40905229beeSJens Axboe 
41005229beeSJens Axboe 	if (poll_on)
4118814ce8aSBart Van Assche 		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
41205229beeSJens Axboe 	else
4138814ce8aSBart Van Assche 		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
41405229beeSJens Axboe 
41505229beeSJens Axboe 	return ret;
41605229beeSJens Axboe }
41705229beeSJens Axboe 
41865cd1d13SWeiping Zhang static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
41965cd1d13SWeiping Zhang {
42065cd1d13SWeiping Zhang 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
42165cd1d13SWeiping Zhang }
42265cd1d13SWeiping Zhang 
42365cd1d13SWeiping Zhang static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
42465cd1d13SWeiping Zhang 				  size_t count)
42565cd1d13SWeiping Zhang {
42665cd1d13SWeiping Zhang 	unsigned int val;
42765cd1d13SWeiping Zhang 	int err;
42865cd1d13SWeiping Zhang 
42965cd1d13SWeiping Zhang 	err = kstrtou32(page, 10, &val);
43065cd1d13SWeiping Zhang 	if (err || val == 0)
43165cd1d13SWeiping Zhang 		return -EINVAL;
43265cd1d13SWeiping Zhang 
43365cd1d13SWeiping Zhang 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
43465cd1d13SWeiping Zhang 
43565cd1d13SWeiping Zhang 	return count;
43665cd1d13SWeiping Zhang }
43765cd1d13SWeiping Zhang 
43887760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
43987760e5eSJens Axboe {
440a7905043SJosef Bacik 	if (!wbt_rq_qos(q))
44187760e5eSJens Axboe 		return -EINVAL;
44287760e5eSJens Axboe 
443a7905043SJosef Bacik 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
44487760e5eSJens Axboe }
44587760e5eSJens Axboe 
44687760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
44787760e5eSJens Axboe 				  size_t count)
44887760e5eSJens Axboe {
449a7905043SJosef Bacik 	struct rq_qos *rqos;
45087760e5eSJens Axboe 	ssize_t ret;
45180e091d1SJens Axboe 	s64 val;
45287760e5eSJens Axboe 
45387760e5eSJens Axboe 	ret = queue_var_store64(&val, page);
45487760e5eSJens Axboe 	if (ret < 0)
45587760e5eSJens Axboe 		return ret;
456d62118b6SJens Axboe 	if (val < -1)
457d62118b6SJens Axboe 		return -EINVAL;
458d62118b6SJens Axboe 
459a7905043SJosef Bacik 	rqos = wbt_rq_qos(q);
460a7905043SJosef Bacik 	if (!rqos) {
461d62118b6SJens Axboe 		ret = wbt_init(q);
462d62118b6SJens Axboe 		if (ret)
463d62118b6SJens Axboe 			return ret;
464d62118b6SJens Axboe 	}
46587760e5eSJens Axboe 
46680e091d1SJens Axboe 	if (val == -1)
467a7905043SJosef Bacik 		val = wbt_default_latency_nsec(q);
46880e091d1SJens Axboe 	else if (val >= 0)
469a7905043SJosef Bacik 		val *= 1000ULL;
470d62118b6SJens Axboe 
471*b7143fe6SAleksei Zakharov 	if (wbt_get_min_lat(q) == val)
472*b7143fe6SAleksei Zakharov 		return count;
473*b7143fe6SAleksei Zakharov 
474c125311dSJens Axboe 	/*
475c125311dSJens Axboe 	 * Ensure that the queue is idled, in case the latency update
476c125311dSJens Axboe 	 * ends up either enabling or disabling wbt completely. We can't
477c125311dSJens Axboe 	 * have IO inflight if that happens.
478c125311dSJens Axboe 	 */
479c125311dSJens Axboe 	blk_mq_freeze_queue(q);
480c125311dSJens Axboe 	blk_mq_quiesce_queue(q);
48180e091d1SJens Axboe 
482c125311dSJens Axboe 	wbt_set_min_lat(q, val);
483a7905043SJosef Bacik 	wbt_update_limits(q);
484c125311dSJens Axboe 
485c125311dSJens Axboe 	blk_mq_unquiesce_queue(q);
486c125311dSJens Axboe 	blk_mq_unfreeze_queue(q);
487c125311dSJens Axboe 
48887760e5eSJens Axboe 	return count;
48987760e5eSJens Axboe }
49087760e5eSJens Axboe 
49193e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page)
49293e9d8e8SJens Axboe {
49393e9d8e8SJens Axboe 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
49493e9d8e8SJens Axboe 		return sprintf(page, "write back\n");
49593e9d8e8SJens Axboe 
49693e9d8e8SJens Axboe 	return sprintf(page, "write through\n");
49793e9d8e8SJens Axboe }
49893e9d8e8SJens Axboe 
49993e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page,
50093e9d8e8SJens Axboe 			      size_t count)
50193e9d8e8SJens Axboe {
50293e9d8e8SJens Axboe 	int set = -1;
50393e9d8e8SJens Axboe 
50493e9d8e8SJens Axboe 	if (!strncmp(page, "write back", 10))
50593e9d8e8SJens Axboe 		set = 1;
50693e9d8e8SJens Axboe 	else if (!strncmp(page, "write through", 13) ||
50793e9d8e8SJens Axboe 		 !strncmp(page, "none", 4))
50893e9d8e8SJens Axboe 		set = 0;
50993e9d8e8SJens Axboe 
51093e9d8e8SJens Axboe 	if (set == -1)
51193e9d8e8SJens Axboe 		return -EINVAL;
51293e9d8e8SJens Axboe 
51393e9d8e8SJens Axboe 	if (set)
5148814ce8aSBart Van Assche 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
51593e9d8e8SJens Axboe 	else
5168814ce8aSBart Van Assche 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
51793e9d8e8SJens Axboe 
51893e9d8e8SJens Axboe 	return count;
51993e9d8e8SJens Axboe }
52093e9d8e8SJens Axboe 
5216fcefbe5SKent Overstreet static ssize_t queue_fua_show(struct request_queue *q, char *page)
5226fcefbe5SKent Overstreet {
5236fcefbe5SKent Overstreet 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
5246fcefbe5SKent Overstreet }
5256fcefbe5SKent Overstreet 
526ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page)
527ea6ca600SYigal Korman {
528ea6ca600SYigal Korman 	return queue_var_show(blk_queue_dax(q), page);
529ea6ca600SYigal Korman }
530ea6ca600SYigal Korman 
5318324aa91SJens Axboe static struct queue_sysfs_entry queue_requests_entry = {
5325657a819SJoe Perches 	.attr = {.name = "nr_requests", .mode = 0644 },
5338324aa91SJens Axboe 	.show = queue_requests_show,
5348324aa91SJens Axboe 	.store = queue_requests_store,
5358324aa91SJens Axboe };
5368324aa91SJens Axboe 
5378324aa91SJens Axboe static struct queue_sysfs_entry queue_ra_entry = {
5385657a819SJoe Perches 	.attr = {.name = "read_ahead_kb", .mode = 0644 },
5398324aa91SJens Axboe 	.show = queue_ra_show,
5408324aa91SJens Axboe 	.store = queue_ra_store,
5418324aa91SJens Axboe };
5428324aa91SJens Axboe 
5438324aa91SJens Axboe static struct queue_sysfs_entry queue_max_sectors_entry = {
5445657a819SJoe Perches 	.attr = {.name = "max_sectors_kb", .mode = 0644 },
5458324aa91SJens Axboe 	.show = queue_max_sectors_show,
5468324aa91SJens Axboe 	.store = queue_max_sectors_store,
5478324aa91SJens Axboe };
5488324aa91SJens Axboe 
5498324aa91SJens Axboe static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
5505657a819SJoe Perches 	.attr = {.name = "max_hw_sectors_kb", .mode = 0444 },
5518324aa91SJens Axboe 	.show = queue_max_hw_sectors_show,
5528324aa91SJens Axboe };
5538324aa91SJens Axboe 
554c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segments_entry = {
5555657a819SJoe Perches 	.attr = {.name = "max_segments", .mode = 0444 },
556c77a5710SMartin K. Petersen 	.show = queue_max_segments_show,
557c77a5710SMartin K. Petersen };
558c77a5710SMartin K. Petersen 
5591e739730SChristoph Hellwig static struct queue_sysfs_entry queue_max_discard_segments_entry = {
5605657a819SJoe Perches 	.attr = {.name = "max_discard_segments", .mode = 0444 },
5611e739730SChristoph Hellwig 	.show = queue_max_discard_segments_show,
5621e739730SChristoph Hellwig };
5631e739730SChristoph Hellwig 
56413f05c8dSMartin K. Petersen static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
5655657a819SJoe Perches 	.attr = {.name = "max_integrity_segments", .mode = 0444 },
56613f05c8dSMartin K. Petersen 	.show = queue_max_integrity_segments_show,
56713f05c8dSMartin K. Petersen };
56813f05c8dSMartin K. Petersen 
569c77a5710SMartin K. Petersen static struct queue_sysfs_entry queue_max_segment_size_entry = {
5705657a819SJoe Perches 	.attr = {.name = "max_segment_size", .mode = 0444 },
571c77a5710SMartin K. Petersen 	.show = queue_max_segment_size_show,
572c77a5710SMartin K. Petersen };
573c77a5710SMartin K. Petersen 
5748324aa91SJens Axboe static struct queue_sysfs_entry queue_iosched_entry = {
5755657a819SJoe Perches 	.attr = {.name = "scheduler", .mode = 0644 },
5768324aa91SJens Axboe 	.show = elv_iosched_show,
5778324aa91SJens Axboe 	.store = elv_iosched_store,
5788324aa91SJens Axboe };
5798324aa91SJens Axboe 
580e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = {
5815657a819SJoe Perches 	.attr = {.name = "hw_sector_size", .mode = 0444 },
582e1defc4fSMartin K. Petersen 	.show = queue_logical_block_size_show,
583e1defc4fSMartin K. Petersen };
584e1defc4fSMartin K. Petersen 
585e1defc4fSMartin K. Petersen static struct queue_sysfs_entry queue_logical_block_size_entry = {
5865657a819SJoe Perches 	.attr = {.name = "logical_block_size", .mode = 0444 },
587e1defc4fSMartin K. Petersen 	.show = queue_logical_block_size_show,
588e68b903cSMartin K. Petersen };
589e68b903cSMartin K. Petersen 
590c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_physical_block_size_entry = {
5915657a819SJoe Perches 	.attr = {.name = "physical_block_size", .mode = 0444 },
592c72758f3SMartin K. Petersen 	.show = queue_physical_block_size_show,
593c72758f3SMartin K. Petersen };
594c72758f3SMartin K. Petersen 
59587caf97cSHannes Reinecke static struct queue_sysfs_entry queue_chunk_sectors_entry = {
5965657a819SJoe Perches 	.attr = {.name = "chunk_sectors", .mode = 0444 },
59787caf97cSHannes Reinecke 	.show = queue_chunk_sectors_show,
59887caf97cSHannes Reinecke };
59987caf97cSHannes Reinecke 
600c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_min_entry = {
6015657a819SJoe Perches 	.attr = {.name = "minimum_io_size", .mode = 0444 },
602c72758f3SMartin K. Petersen 	.show = queue_io_min_show,
603c72758f3SMartin K. Petersen };
604c72758f3SMartin K. Petersen 
605c72758f3SMartin K. Petersen static struct queue_sysfs_entry queue_io_opt_entry = {
6065657a819SJoe Perches 	.attr = {.name = "optimal_io_size", .mode = 0444 },
607c72758f3SMartin K. Petersen 	.show = queue_io_opt_show,
6088324aa91SJens Axboe };
6098324aa91SJens Axboe 
61086b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_granularity_entry = {
6115657a819SJoe Perches 	.attr = {.name = "discard_granularity", .mode = 0444 },
61286b37281SMartin K. Petersen 	.show = queue_discard_granularity_show,
61386b37281SMartin K. Petersen };
61486b37281SMartin K. Petersen 
6150034af03SJens Axboe static struct queue_sysfs_entry queue_discard_max_hw_entry = {
6165657a819SJoe Perches 	.attr = {.name = "discard_max_hw_bytes", .mode = 0444 },
6170034af03SJens Axboe 	.show = queue_discard_max_hw_show,
6180034af03SJens Axboe };
6190034af03SJens Axboe 
62086b37281SMartin K. Petersen static struct queue_sysfs_entry queue_discard_max_entry = {
6215657a819SJoe Perches 	.attr = {.name = "discard_max_bytes", .mode = 0644 },
62286b37281SMartin K. Petersen 	.show = queue_discard_max_show,
6230034af03SJens Axboe 	.store = queue_discard_max_store,
62486b37281SMartin K. Petersen };
62586b37281SMartin K. Petersen 
62698262f27SMartin K. Petersen static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
6275657a819SJoe Perches 	.attr = {.name = "discard_zeroes_data", .mode = 0444 },
62898262f27SMartin K. Petersen 	.show = queue_discard_zeroes_data_show,
62998262f27SMartin K. Petersen };
63098262f27SMartin K. Petersen 
6314363ac7cSMartin K. Petersen static struct queue_sysfs_entry queue_write_same_max_entry = {
6325657a819SJoe Perches 	.attr = {.name = "write_same_max_bytes", .mode = 0444 },
6334363ac7cSMartin K. Petersen 	.show = queue_write_same_max_show,
6344363ac7cSMartin K. Petersen };
6354363ac7cSMartin K. Petersen 
636a6f0788eSChaitanya Kulkarni static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
6375657a819SJoe Perches 	.attr = {.name = "write_zeroes_max_bytes", .mode = 0444 },
638a6f0788eSChaitanya Kulkarni 	.show = queue_write_zeroes_max_show,
639a6f0788eSChaitanya Kulkarni };
640a6f0788eSChaitanya Kulkarni 
6411308835fSBartlomiej Zolnierkiewicz static struct queue_sysfs_entry queue_nonrot_entry = {
6425657a819SJoe Perches 	.attr = {.name = "rotational", .mode = 0644 },
643956bcb7cSJens Axboe 	.show = queue_show_nonrot,
644956bcb7cSJens Axboe 	.store = queue_store_nonrot,
6451308835fSBartlomiej Zolnierkiewicz };
6461308835fSBartlomiej Zolnierkiewicz 
647797476b8SDamien Le Moal static struct queue_sysfs_entry queue_zoned_entry = {
6485657a819SJoe Perches 	.attr = {.name = "zoned", .mode = 0444 },
649797476b8SDamien Le Moal 	.show = queue_zoned_show,
650797476b8SDamien Le Moal };
651797476b8SDamien Le Moal 
652965b652eSDamien Le Moal static struct queue_sysfs_entry queue_nr_zones_entry = {
653965b652eSDamien Le Moal 	.attr = {.name = "nr_zones", .mode = 0444 },
654965b652eSDamien Le Moal 	.show = queue_nr_zones_show,
655965b652eSDamien Le Moal };
656965b652eSDamien Le Moal 
657ac9fafa1SAlan D. Brunelle static struct queue_sysfs_entry queue_nomerges_entry = {
6585657a819SJoe Perches 	.attr = {.name = "nomerges", .mode = 0644 },
659ac9fafa1SAlan D. Brunelle 	.show = queue_nomerges_show,
660ac9fafa1SAlan D. Brunelle 	.store = queue_nomerges_store,
661ac9fafa1SAlan D. Brunelle };
662ac9fafa1SAlan D. Brunelle 
663c7c22e4dSJens Axboe static struct queue_sysfs_entry queue_rq_affinity_entry = {
6645657a819SJoe Perches 	.attr = {.name = "rq_affinity", .mode = 0644 },
665c7c22e4dSJens Axboe 	.show = queue_rq_affinity_show,
666c7c22e4dSJens Axboe 	.store = queue_rq_affinity_store,
667c7c22e4dSJens Axboe };
668c7c22e4dSJens Axboe 
669bc58ba94SJens Axboe static struct queue_sysfs_entry queue_iostats_entry = {
6705657a819SJoe Perches 	.attr = {.name = "iostats", .mode = 0644 },
671956bcb7cSJens Axboe 	.show = queue_show_iostats,
672956bcb7cSJens Axboe 	.store = queue_store_iostats,
673bc58ba94SJens Axboe };
674bc58ba94SJens Axboe 
675e2e1a148SJens Axboe static struct queue_sysfs_entry queue_random_entry = {
6765657a819SJoe Perches 	.attr = {.name = "add_random", .mode = 0644 },
677956bcb7cSJens Axboe 	.show = queue_show_random,
678956bcb7cSJens Axboe 	.store = queue_store_random,
679e2e1a148SJens Axboe };
680e2e1a148SJens Axboe 
68105229beeSJens Axboe static struct queue_sysfs_entry queue_poll_entry = {
6825657a819SJoe Perches 	.attr = {.name = "io_poll", .mode = 0644 },
68305229beeSJens Axboe 	.show = queue_poll_show,
68405229beeSJens Axboe 	.store = queue_poll_store,
68505229beeSJens Axboe };
68605229beeSJens Axboe 
68706426adfSJens Axboe static struct queue_sysfs_entry queue_poll_delay_entry = {
6885657a819SJoe Perches 	.attr = {.name = "io_poll_delay", .mode = 0644 },
68906426adfSJens Axboe 	.show = queue_poll_delay_show,
69006426adfSJens Axboe 	.store = queue_poll_delay_store,
69106426adfSJens Axboe };
69206426adfSJens Axboe 
69393e9d8e8SJens Axboe static struct queue_sysfs_entry queue_wc_entry = {
6945657a819SJoe Perches 	.attr = {.name = "write_cache", .mode = 0644 },
69593e9d8e8SJens Axboe 	.show = queue_wc_show,
69693e9d8e8SJens Axboe 	.store = queue_wc_store,
69793e9d8e8SJens Axboe };
69893e9d8e8SJens Axboe 
6996fcefbe5SKent Overstreet static struct queue_sysfs_entry queue_fua_entry = {
7005657a819SJoe Perches 	.attr = {.name = "fua", .mode = 0444 },
7016fcefbe5SKent Overstreet 	.show = queue_fua_show,
7026fcefbe5SKent Overstreet };
7036fcefbe5SKent Overstreet 
704ea6ca600SYigal Korman static struct queue_sysfs_entry queue_dax_entry = {
7055657a819SJoe Perches 	.attr = {.name = "dax", .mode = 0444 },
706ea6ca600SYigal Korman 	.show = queue_dax_show,
707ea6ca600SYigal Korman };
708ea6ca600SYigal Korman 
70965cd1d13SWeiping Zhang static struct queue_sysfs_entry queue_io_timeout_entry = {
71065cd1d13SWeiping Zhang 	.attr = {.name = "io_timeout", .mode = 0644 },
71165cd1d13SWeiping Zhang 	.show = queue_io_timeout_show,
71265cd1d13SWeiping Zhang 	.store = queue_io_timeout_store,
71365cd1d13SWeiping Zhang };
71465cd1d13SWeiping Zhang 
71587760e5eSJens Axboe static struct queue_sysfs_entry queue_wb_lat_entry = {
7165657a819SJoe Perches 	.attr = {.name = "wbt_lat_usec", .mode = 0644 },
71787760e5eSJens Axboe 	.show = queue_wb_lat_show,
71887760e5eSJens Axboe 	.store = queue_wb_lat_store,
71987760e5eSJens Axboe };
72087760e5eSJens Axboe 
721297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
722297e3d85SShaohua Li static struct queue_sysfs_entry throtl_sample_time_entry = {
7235657a819SJoe Perches 	.attr = {.name = "throttle_sample_time", .mode = 0644 },
724297e3d85SShaohua Li 	.show = blk_throtl_sample_time_show,
725297e3d85SShaohua Li 	.store = blk_throtl_sample_time_store,
726297e3d85SShaohua Li };
727297e3d85SShaohua Li #endif
728297e3d85SShaohua Li 
7298324aa91SJens Axboe static struct attribute *default_attrs[] = {
7308324aa91SJens Axboe 	&queue_requests_entry.attr,
7318324aa91SJens Axboe 	&queue_ra_entry.attr,
7328324aa91SJens Axboe 	&queue_max_hw_sectors_entry.attr,
7338324aa91SJens Axboe 	&queue_max_sectors_entry.attr,
734c77a5710SMartin K. Petersen 	&queue_max_segments_entry.attr,
7351e739730SChristoph Hellwig 	&queue_max_discard_segments_entry.attr,
73613f05c8dSMartin K. Petersen 	&queue_max_integrity_segments_entry.attr,
737c77a5710SMartin K. Petersen 	&queue_max_segment_size_entry.attr,
7388324aa91SJens Axboe 	&queue_iosched_entry.attr,
739e68b903cSMartin K. Petersen 	&queue_hw_sector_size_entry.attr,
740e1defc4fSMartin K. Petersen 	&queue_logical_block_size_entry.attr,
741c72758f3SMartin K. Petersen 	&queue_physical_block_size_entry.attr,
74287caf97cSHannes Reinecke 	&queue_chunk_sectors_entry.attr,
743c72758f3SMartin K. Petersen 	&queue_io_min_entry.attr,
744c72758f3SMartin K. Petersen 	&queue_io_opt_entry.attr,
74586b37281SMartin K. Petersen 	&queue_discard_granularity_entry.attr,
74686b37281SMartin K. Petersen 	&queue_discard_max_entry.attr,
7470034af03SJens Axboe 	&queue_discard_max_hw_entry.attr,
74898262f27SMartin K. Petersen 	&queue_discard_zeroes_data_entry.attr,
7494363ac7cSMartin K. Petersen 	&queue_write_same_max_entry.attr,
750a6f0788eSChaitanya Kulkarni 	&queue_write_zeroes_max_entry.attr,
7511308835fSBartlomiej Zolnierkiewicz 	&queue_nonrot_entry.attr,
752797476b8SDamien Le Moal 	&queue_zoned_entry.attr,
753965b652eSDamien Le Moal 	&queue_nr_zones_entry.attr,
754ac9fafa1SAlan D. Brunelle 	&queue_nomerges_entry.attr,
755c7c22e4dSJens Axboe 	&queue_rq_affinity_entry.attr,
756bc58ba94SJens Axboe 	&queue_iostats_entry.attr,
757e2e1a148SJens Axboe 	&queue_random_entry.attr,
75805229beeSJens Axboe 	&queue_poll_entry.attr,
75993e9d8e8SJens Axboe 	&queue_wc_entry.attr,
7606fcefbe5SKent Overstreet 	&queue_fua_entry.attr,
761ea6ca600SYigal Korman 	&queue_dax_entry.attr,
76287760e5eSJens Axboe 	&queue_wb_lat_entry.attr,
76306426adfSJens Axboe 	&queue_poll_delay_entry.attr,
76465cd1d13SWeiping Zhang 	&queue_io_timeout_entry.attr,
765297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
766297e3d85SShaohua Li 	&throtl_sample_time_entry.attr,
767297e3d85SShaohua Li #endif
7688324aa91SJens Axboe 	NULL,
7698324aa91SJens Axboe };
7708324aa91SJens Axboe 
7718324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
7728324aa91SJens Axboe 
7738324aa91SJens Axboe static ssize_t
7748324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
7758324aa91SJens Axboe {
7768324aa91SJens Axboe 	struct queue_sysfs_entry *entry = to_queue(attr);
7778324aa91SJens Axboe 	struct request_queue *q =
7788324aa91SJens Axboe 		container_of(kobj, struct request_queue, kobj);
7798324aa91SJens Axboe 	ssize_t res;
7808324aa91SJens Axboe 
7818324aa91SJens Axboe 	if (!entry->show)
7828324aa91SJens Axboe 		return -EIO;
7838324aa91SJens Axboe 	mutex_lock(&q->sysfs_lock);
7843f3299d5SBart Van Assche 	if (blk_queue_dying(q)) {
7858324aa91SJens Axboe 		mutex_unlock(&q->sysfs_lock);
7868324aa91SJens Axboe 		return -ENOENT;
7878324aa91SJens Axboe 	}
7888324aa91SJens Axboe 	res = entry->show(q, page);
7898324aa91SJens Axboe 	mutex_unlock(&q->sysfs_lock);
7908324aa91SJens Axboe 	return res;
7918324aa91SJens Axboe }
7928324aa91SJens Axboe 
7938324aa91SJens Axboe static ssize_t
7948324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr,
7958324aa91SJens Axboe 		    const char *page, size_t length)
7968324aa91SJens Axboe {
7978324aa91SJens Axboe 	struct queue_sysfs_entry *entry = to_queue(attr);
7986728cb0eSJens Axboe 	struct request_queue *q;
7998324aa91SJens Axboe 	ssize_t res;
8008324aa91SJens Axboe 
8018324aa91SJens Axboe 	if (!entry->store)
8028324aa91SJens Axboe 		return -EIO;
8036728cb0eSJens Axboe 
8046728cb0eSJens Axboe 	q = container_of(kobj, struct request_queue, kobj);
8058324aa91SJens Axboe 	mutex_lock(&q->sysfs_lock);
8063f3299d5SBart Van Assche 	if (blk_queue_dying(q)) {
8078324aa91SJens Axboe 		mutex_unlock(&q->sysfs_lock);
8088324aa91SJens Axboe 		return -ENOENT;
8098324aa91SJens Axboe 	}
8108324aa91SJens Axboe 	res = entry->store(q, page, length);
8118324aa91SJens Axboe 	mutex_unlock(&q->sysfs_lock);
8128324aa91SJens Axboe 	return res;
8138324aa91SJens Axboe }
8148324aa91SJens Axboe 
815548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head)
816548bc8e1STejun Heo {
817548bc8e1STejun Heo 	struct request_queue *q = container_of(rcu_head, struct request_queue,
818548bc8e1STejun Heo 					       rcu_head);
819548bc8e1STejun Heo 	kmem_cache_free(blk_requestq_cachep, q);
820548bc8e1STejun Heo }
821548bc8e1STejun Heo 
8228324aa91SJens Axboe /**
8231e936428SMarcos Paulo de Souza  * __blk_release_queue - release a request queue
824dc9edc44SBart Van Assche  * @work: pointer to the release_work member of the request queue to be released
8258324aa91SJens Axboe  *
8268324aa91SJens Axboe  * Description:
8271e936428SMarcos Paulo de Souza  *     This function is called when a block device is being unregistered. The
8281e936428SMarcos Paulo de Souza  *     process of releasing a request queue starts with blk_cleanup_queue, which
8291e936428SMarcos Paulo de Souza  *     set the appropriate flags and then calls blk_put_queue, that decrements
8301e936428SMarcos Paulo de Souza  *     the reference counter of the request queue. Once the reference counter
8311e936428SMarcos Paulo de Souza  *     of the request queue reaches zero, blk_release_queue is called to release
8321e936428SMarcos Paulo de Souza  *     all allocated resources of the request queue.
833dc9edc44SBart Van Assche  */
834dc9edc44SBart Van Assche static void __blk_release_queue(struct work_struct *work)
8358324aa91SJens Axboe {
836dc9edc44SBart Van Assche 	struct request_queue *q = container_of(work, typeof(*q), release_work);
8378324aa91SJens Axboe 
83834dbad5dSOmar Sandoval 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
83934dbad5dSOmar Sandoval 		blk_stat_remove_callback(q, q->poll_cb);
84034dbad5dSOmar Sandoval 	blk_stat_free_callback(q->poll_cb);
841777eb1bfSHannes Reinecke 
84224ecc358SBart Van Assche 	if (!blk_queue_dead(q)) {
84324ecc358SBart Van Assche 		/*
84424ecc358SBart Van Assche 		 * Last reference was dropped without having called
84524ecc358SBart Van Assche 		 * blk_cleanup_queue().
84624ecc358SBart Van Assche 		 */
84724ecc358SBart Van Assche 		WARN_ONCE(blk_queue_init_done(q),
84824ecc358SBart Van Assche 			  "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
84924ecc358SBart Van Assche 			  q);
85024ecc358SBart Van Assche 		blk_exit_queue(q);
85124ecc358SBart Van Assche 	}
85224ecc358SBart Van Assche 
853b86d865cSBart Van Assche 	WARN(blk_queue_root_blkg(q),
85424ecc358SBart Van Assche 	     "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
85524ecc358SBart Van Assche 	     q);
85624ecc358SBart Van Assche 
85734dbad5dSOmar Sandoval 	blk_free_queue_stats(q->stats);
85834dbad5dSOmar Sandoval 
859bf505456SDamien Le Moal 	blk_queue_free_zone_bitmaps(q);
860bf505456SDamien Le Moal 
861344e9ffcSJens Axboe 	if (queue_is_mq(q))
862e09aae7eSMing Lei 		blk_mq_release(q);
86318741986SChristoph Hellwig 
8648324aa91SJens Axboe 	blk_trace_shutdown(q);
8658324aa91SJens Axboe 
866344e9ffcSJens Axboe 	if (queue_is_mq(q))
86762ebce16SOmar Sandoval 		blk_mq_debugfs_unregister(q);
86862ebce16SOmar Sandoval 
869338aa96dSKent Overstreet 	bioset_exit(&q->bio_split);
87054efd50bSKent Overstreet 
871a73f730dSTejun Heo 	ida_simple_remove(&blk_queue_ida, q->id);
872548bc8e1STejun Heo 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
8738324aa91SJens Axboe }
8748324aa91SJens Axboe 
875dc9edc44SBart Van Assche static void blk_release_queue(struct kobject *kobj)
876dc9edc44SBart Van Assche {
877dc9edc44SBart Van Assche 	struct request_queue *q =
878dc9edc44SBart Van Assche 		container_of(kobj, struct request_queue, kobj);
879dc9edc44SBart Van Assche 
880dc9edc44SBart Van Assche 	INIT_WORK(&q->release_work, __blk_release_queue);
881dc9edc44SBart Van Assche 	schedule_work(&q->release_work);
882dc9edc44SBart Van Assche }
883dc9edc44SBart Van Assche 
88452cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = {
8858324aa91SJens Axboe 	.show	= queue_attr_show,
8868324aa91SJens Axboe 	.store	= queue_attr_store,
8878324aa91SJens Axboe };
8888324aa91SJens Axboe 
8898324aa91SJens Axboe struct kobj_type blk_queue_ktype = {
8908324aa91SJens Axboe 	.sysfs_ops	= &queue_sysfs_ops,
8918324aa91SJens Axboe 	.default_attrs	= default_attrs,
8928324aa91SJens Axboe 	.release	= blk_release_queue,
8938324aa91SJens Axboe };
8948324aa91SJens Axboe 
8952c2086afSBart Van Assche /**
8962c2086afSBart Van Assche  * blk_register_queue - register a block layer queue with sysfs
8972c2086afSBart Van Assche  * @disk: Disk of which the request queue should be registered with sysfs.
8982c2086afSBart Van Assche  */
8998324aa91SJens Axboe int blk_register_queue(struct gendisk *disk)
9008324aa91SJens Axboe {
9018324aa91SJens Axboe 	int ret;
9021d54ad6dSLi Zefan 	struct device *dev = disk_to_dev(disk);
9038324aa91SJens Axboe 	struct request_queue *q = disk->queue;
9048324aa91SJens Axboe 
905fb199746SAkinobu Mita 	if (WARN_ON(!q))
9068324aa91SJens Axboe 		return -ENXIO;
9078324aa91SJens Axboe 
908334335d2SOmar Sandoval 	WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
909334335d2SOmar Sandoval 		  "%s is registering an already registered queue\n",
910334335d2SOmar Sandoval 		  kobject_name(&dev->kobj));
91157d74df9SChristoph Hellwig 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
912334335d2SOmar Sandoval 
913749fefe6STejun Heo 	/*
91417497acbSTejun Heo 	 * SCSI probing may synchronously create and destroy a lot of
91517497acbSTejun Heo 	 * request_queues for non-existent devices.  Shutting down a fully
91617497acbSTejun Heo 	 * functional queue takes measureable wallclock time as RCU grace
91717497acbSTejun Heo 	 * periods are involved.  To avoid excessive latency in these
91817497acbSTejun Heo 	 * cases, a request_queue starts out in a degraded mode which is
91917497acbSTejun Heo 	 * faster to shut down and is made fully functional here as
92017497acbSTejun Heo 	 * request_queues for non-existent devices never get registered.
921749fefe6STejun Heo 	 */
922df35c7c9SAlan Stern 	if (!blk_queue_init_done(q)) {
92357d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
9243ef28e83SDan Williams 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
925df35c7c9SAlan Stern 	}
926749fefe6STejun Heo 
9271d54ad6dSLi Zefan 	ret = blk_trace_init_sysfs(dev);
9281d54ad6dSLi Zefan 	if (ret)
9291d54ad6dSLi Zefan 		return ret;
9301d54ad6dSLi Zefan 
931b410aff2STahsin Erdogan 	/* Prevent changes through sysfs until registration is completed. */
932b410aff2STahsin Erdogan 	mutex_lock(&q->sysfs_lock);
933b410aff2STahsin Erdogan 
934c9059598SLinus Torvalds 	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
935ed5302d3SLiu Yuan 	if (ret < 0) {
936ed5302d3SLiu Yuan 		blk_trace_remove_sysfs(dev);
937b410aff2STahsin Erdogan 		goto unlock;
938ed5302d3SLiu Yuan 	}
9398324aa91SJens Axboe 
940344e9ffcSJens Axboe 	if (queue_is_mq(q)) {
9412d0364c8SBart Van Assche 		__blk_mq_register_dev(dev, q);
9429c1051aaSOmar Sandoval 		blk_mq_debugfs_register(q);
943a8ecdd71SBart Van Assche 	}
9449c1051aaSOmar Sandoval 
9458324aa91SJens Axboe 	kobject_uevent(&q->kobj, KOBJ_ADD);
9468324aa91SJens Axboe 
9478330cdb0SJan Kara 	wbt_enable_default(q);
94887760e5eSJens Axboe 
949d61fcfa4SShaohua Li 	blk_throtl_register_queue(q);
950d61fcfa4SShaohua Li 
951344e9ffcSJens Axboe 	if (q->elevator) {
9528324aa91SJens Axboe 		ret = elv_register_queue(q);
9538324aa91SJens Axboe 		if (ret) {
9542c2086afSBart Van Assche 			mutex_unlock(&q->sysfs_lock);
9558324aa91SJens Axboe 			kobject_uevent(&q->kobj, KOBJ_REMOVE);
9568324aa91SJens Axboe 			kobject_del(&q->kobj);
95780656b67SLiu Yuan 			blk_trace_remove_sysfs(dev);
958c87ffbb8SXiaotian Feng 			kobject_put(&dev->kobj);
9592c2086afSBart Van Assche 			return ret;
960b410aff2STahsin Erdogan 		}
961b410aff2STahsin Erdogan 	}
962b410aff2STahsin Erdogan 	ret = 0;
963b410aff2STahsin Erdogan unlock:
964b410aff2STahsin Erdogan 	mutex_unlock(&q->sysfs_lock);
9658324aa91SJens Axboe 	return ret;
9668324aa91SJens Axboe }
967fa70d2e2SMike Snitzer EXPORT_SYMBOL_GPL(blk_register_queue);
9688324aa91SJens Axboe 
9692c2086afSBart Van Assche /**
9702c2086afSBart Van Assche  * blk_unregister_queue - counterpart of blk_register_queue()
9712c2086afSBart Van Assche  * @disk: Disk of which the request queue should be unregistered from sysfs.
9722c2086afSBart Van Assche  *
9732c2086afSBart Van Assche  * Note: the caller is responsible for guaranteeing that this function is called
9742c2086afSBart Van Assche  * after blk_register_queue() has finished.
9752c2086afSBart Van Assche  */
9768324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk)
9778324aa91SJens Axboe {
9788324aa91SJens Axboe 	struct request_queue *q = disk->queue;
9798324aa91SJens Axboe 
980fb199746SAkinobu Mita 	if (WARN_ON(!q))
981fb199746SAkinobu Mita 		return;
982fb199746SAkinobu Mita 
983fa70d2e2SMike Snitzer 	/* Return early if disk->queue was never registered. */
984fa70d2e2SMike Snitzer 	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
985fa70d2e2SMike Snitzer 		return;
986fa70d2e2SMike Snitzer 
987667257e8SMike Snitzer 	/*
9882c2086afSBart Van Assche 	 * Since sysfs_remove_dir() prevents adding new directory entries
9892c2086afSBart Van Assche 	 * before removal of existing entries starts, protect against
9902c2086afSBart Van Assche 	 * concurrent elv_iosched_store() calls.
991667257e8SMike Snitzer 	 */
992e9a823fbSDavid Jeffery 	mutex_lock(&q->sysfs_lock);
993667257e8SMike Snitzer 
9948814ce8aSBart Van Assche 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
995334335d2SOmar Sandoval 
9962c2086afSBart Van Assche 	/*
9972c2086afSBart Van Assche 	 * Remove the sysfs attributes before unregistering the queue data
9982c2086afSBart Van Assche 	 * structures that can be modified through sysfs.
9992c2086afSBart Van Assche 	 */
1000344e9ffcSJens Axboe 	if (queue_is_mq(q))
1001b21d5b30SMatias Bjørling 		blk_mq_unregister_dev(disk_to_dev(disk), q);
10022c2086afSBart Van Assche 	mutex_unlock(&q->sysfs_lock);
10038324aa91SJens Axboe 
10048324aa91SJens Axboe 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
10058324aa91SJens Axboe 	kobject_del(&q->kobj);
100648c0d4d4SZdenek Kabelac 	blk_trace_remove_sysfs(disk_to_dev(disk));
1007667257e8SMike Snitzer 
10082c2086afSBart Van Assche 	mutex_lock(&q->sysfs_lock);
1009344e9ffcSJens Axboe 	if (q->elevator)
10102c2086afSBart Van Assche 		elv_unregister_queue(q);
1011667257e8SMike Snitzer 	mutex_unlock(&q->sysfs_lock);
10122c2086afSBart Van Assche 
10132c2086afSBart Van Assche 	kobject_put(&disk_to_dev(disk)->kobj);
10148324aa91SJens Axboe }
1015