xref: /linux/block/blk-sysfs.c (revision 6fc75f309d291d328b4ea2f91bef0ff56e4bc7c2)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
28324aa91SJens Axboe /*
38324aa91SJens Axboe  * Functions related to sysfs handling
48324aa91SJens Axboe  */
58324aa91SJens Axboe #include <linux/kernel.h>
65a0e3ad6STejun Heo #include <linux/slab.h>
78324aa91SJens Axboe #include <linux/module.h>
88324aa91SJens Axboe #include <linux/bio.h>
98324aa91SJens Axboe #include <linux/blkdev.h>
1066114cadSTejun Heo #include <linux/backing-dev.h>
118324aa91SJens Axboe #include <linux/blktrace_api.h>
12320ae51fSJens Axboe #include <linux/blk-mq.h>
1385e0cbbbSLuis Chamberlain #include <linux/debugfs.h>
148324aa91SJens Axboe 
158324aa91SJens Axboe #include "blk.h"
163edcc0ceSMing Lei #include "blk-mq.h"
17d173a251SOmar Sandoval #include "blk-mq-debugfs.h"
182aa7745bSChristoph Hellwig #include "blk-mq-sched.h"
1987760e5eSJens Axboe #include "blk-wbt.h"
20672fdcf0SMing Lei #include "blk-cgroup.h"
21a7b36ee6SJens Axboe #include "blk-throttle.h"
228324aa91SJens Axboe 
238324aa91SJens Axboe struct queue_sysfs_entry {
248324aa91SJens Axboe 	struct attribute attr;
258324aa91SJens Axboe 	ssize_t (*show)(struct request_queue *, char *);
268324aa91SJens Axboe 	ssize_t (*store)(struct request_queue *, const char *, size_t);
278324aa91SJens Axboe };
288324aa91SJens Axboe 
298324aa91SJens Axboe static ssize_t
309cb308ceSXiaotian Feng queue_var_show(unsigned long var, char *page)
318324aa91SJens Axboe {
329cb308ceSXiaotian Feng 	return sprintf(page, "%lu\n", var);
338324aa91SJens Axboe }
348324aa91SJens Axboe 
358324aa91SJens Axboe static ssize_t
368324aa91SJens Axboe queue_var_store(unsigned long *var, const char *page, size_t count)
378324aa91SJens Axboe {
38b1f3b64dSDave Reisner 	int err;
39b1f3b64dSDave Reisner 	unsigned long v;
408324aa91SJens Axboe 
41ed751e68SJingoo Han 	err = kstrtoul(page, 10, &v);
42b1f3b64dSDave Reisner 	if (err || v > UINT_MAX)
43b1f3b64dSDave Reisner 		return -EINVAL;
44b1f3b64dSDave Reisner 
45b1f3b64dSDave Reisner 	*var = v;
46b1f3b64dSDave Reisner 
478324aa91SJens Axboe 	return count;
488324aa91SJens Axboe }
498324aa91SJens Axboe 
5080e091d1SJens Axboe static ssize_t queue_var_store64(s64 *var, const char *page)
5187760e5eSJens Axboe {
5287760e5eSJens Axboe 	int err;
5380e091d1SJens Axboe 	s64 v;
5487760e5eSJens Axboe 
5580e091d1SJens Axboe 	err = kstrtos64(page, 10, &v);
5687760e5eSJens Axboe 	if (err < 0)
5787760e5eSJens Axboe 		return err;
5887760e5eSJens Axboe 
5987760e5eSJens Axboe 	*var = v;
6087760e5eSJens Axboe 	return 0;
6187760e5eSJens Axboe }
6287760e5eSJens Axboe 
638324aa91SJens Axboe static ssize_t queue_requests_show(struct request_queue *q, char *page)
648324aa91SJens Axboe {
6528af7428SMax Gurtovoy 	return queue_var_show(q->nr_requests, page);
668324aa91SJens Axboe }
678324aa91SJens Axboe 
688324aa91SJens Axboe static ssize_t
698324aa91SJens Axboe queue_requests_store(struct request_queue *q, const char *page, size_t count)
708324aa91SJens Axboe {
718324aa91SJens Axboe 	unsigned long nr;
72e3a2b3f9SJens Axboe 	int ret, err;
73b8a9ae77SJens Axboe 
74344e9ffcSJens Axboe 	if (!queue_is_mq(q))
75b8a9ae77SJens Axboe 		return -EINVAL;
76b8a9ae77SJens Axboe 
77b8a9ae77SJens Axboe 	ret = queue_var_store(&nr, page, count);
78b1f3b64dSDave Reisner 	if (ret < 0)
79b1f3b64dSDave Reisner 		return ret;
80b1f3b64dSDave Reisner 
818324aa91SJens Axboe 	if (nr < BLKDEV_MIN_RQ)
828324aa91SJens Axboe 		nr = BLKDEV_MIN_RQ;
838324aa91SJens Axboe 
84e3a2b3f9SJens Axboe 	err = blk_mq_update_nr_requests(q, nr);
85e3a2b3f9SJens Axboe 	if (err)
86e3a2b3f9SJens Axboe 		return err;
87a051661cSTejun Heo 
888324aa91SJens Axboe 	return ret;
898324aa91SJens Axboe }
908324aa91SJens Axboe 
918324aa91SJens Axboe static ssize_t queue_ra_show(struct request_queue *q, char *page)
928324aa91SJens Axboe {
93edb0872fSChristoph Hellwig 	unsigned long ra_kb;
948324aa91SJens Axboe 
95d152c682SChristoph Hellwig 	if (!q->disk)
96edb0872fSChristoph Hellwig 		return -EINVAL;
97d152c682SChristoph Hellwig 	ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
988c390ff9SMax Gurtovoy 	return queue_var_show(ra_kb, page);
998324aa91SJens Axboe }
1008324aa91SJens Axboe 
1018324aa91SJens Axboe static ssize_t
1028324aa91SJens Axboe queue_ra_store(struct request_queue *q, const char *page, size_t count)
1038324aa91SJens Axboe {
1048324aa91SJens Axboe 	unsigned long ra_kb;
105edb0872fSChristoph Hellwig 	ssize_t ret;
1068324aa91SJens Axboe 
107d152c682SChristoph Hellwig 	if (!q->disk)
108edb0872fSChristoph Hellwig 		return -EINVAL;
109edb0872fSChristoph Hellwig 	ret = queue_var_store(&ra_kb, page, count);
110b1f3b64dSDave Reisner 	if (ret < 0)
111b1f3b64dSDave Reisner 		return ret;
112d152c682SChristoph Hellwig 	q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
1138324aa91SJens Axboe 	return ret;
1148324aa91SJens Axboe }
1158324aa91SJens Axboe 
1168324aa91SJens Axboe static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
1178324aa91SJens Axboe {
118ae03bf63SMartin K. Petersen 	int max_sectors_kb = queue_max_sectors(q) >> 1;
1198324aa91SJens Axboe 
1208c390ff9SMax Gurtovoy 	return queue_var_show(max_sectors_kb, page);
1218324aa91SJens Axboe }
1228324aa91SJens Axboe 
123c77a5710SMartin K. Petersen static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
124c77a5710SMartin K. Petersen {
1258c390ff9SMax Gurtovoy 	return queue_var_show(queue_max_segments(q), page);
126c77a5710SMartin K. Petersen }
127c77a5710SMartin K. Petersen 
1281e739730SChristoph Hellwig static ssize_t queue_max_discard_segments_show(struct request_queue *q,
1291e739730SChristoph Hellwig 		char *page)
1301e739730SChristoph Hellwig {
1318c390ff9SMax Gurtovoy 	return queue_var_show(queue_max_discard_segments(q), page);
1321e739730SChristoph Hellwig }
1331e739730SChristoph Hellwig 
13413f05c8dSMartin K. Petersen static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
13513f05c8dSMartin K. Petersen {
1368c390ff9SMax Gurtovoy 	return queue_var_show(q->limits.max_integrity_segments, page);
13713f05c8dSMartin K. Petersen }
13813f05c8dSMartin K. Petersen 
139c77a5710SMartin K. Petersen static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
140c77a5710SMartin K. Petersen {
1418c390ff9SMax Gurtovoy 	return queue_var_show(queue_max_segment_size(q), page);
142c77a5710SMartin K. Petersen }
143c77a5710SMartin K. Petersen 
144e1defc4fSMartin K. Petersen static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
145e68b903cSMartin K. Petersen {
146e1defc4fSMartin K. Petersen 	return queue_var_show(queue_logical_block_size(q), page);
147e68b903cSMartin K. Petersen }
148e68b903cSMartin K. Petersen 
149c72758f3SMartin K. Petersen static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
150c72758f3SMartin K. Petersen {
151c72758f3SMartin K. Petersen 	return queue_var_show(queue_physical_block_size(q), page);
152c72758f3SMartin K. Petersen }
153c72758f3SMartin K. Petersen 
15487caf97cSHannes Reinecke static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
15587caf97cSHannes Reinecke {
15687caf97cSHannes Reinecke 	return queue_var_show(q->limits.chunk_sectors, page);
15787caf97cSHannes Reinecke }
15887caf97cSHannes Reinecke 
159c72758f3SMartin K. Petersen static ssize_t queue_io_min_show(struct request_queue *q, char *page)
160c72758f3SMartin K. Petersen {
161c72758f3SMartin K. Petersen 	return queue_var_show(queue_io_min(q), page);
162c72758f3SMartin K. Petersen }
163c72758f3SMartin K. Petersen 
164c72758f3SMartin K. Petersen static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
165c72758f3SMartin K. Petersen {
166c72758f3SMartin K. Petersen 	return queue_var_show(queue_io_opt(q), page);
1678324aa91SJens Axboe }
1688324aa91SJens Axboe 
16986b37281SMartin K. Petersen static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
17086b37281SMartin K. Petersen {
17186b37281SMartin K. Petersen 	return queue_var_show(q->limits.discard_granularity, page);
17286b37281SMartin K. Petersen }
17386b37281SMartin K. Petersen 
1740034af03SJens Axboe static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
1750034af03SJens Axboe {
1760034af03SJens Axboe 
17718f922d0SAlan 	return sprintf(page, "%llu\n",
17818f922d0SAlan 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
1790034af03SJens Axboe }
1800034af03SJens Axboe 
18186b37281SMartin K. Petersen static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
18286b37281SMartin K. Petersen {
183a934a00aSMartin K. Petersen 	return sprintf(page, "%llu\n",
184a934a00aSMartin K. Petersen 		       (unsigned long long)q->limits.max_discard_sectors << 9);
18586b37281SMartin K. Petersen }
18686b37281SMartin K. Petersen 
1870034af03SJens Axboe static ssize_t queue_discard_max_store(struct request_queue *q,
1880034af03SJens Axboe 				       const char *page, size_t count)
1890034af03SJens Axboe {
1900034af03SJens Axboe 	unsigned long max_discard;
1910034af03SJens Axboe 	ssize_t ret = queue_var_store(&max_discard, page, count);
1920034af03SJens Axboe 
1930034af03SJens Axboe 	if (ret < 0)
1940034af03SJens Axboe 		return ret;
1950034af03SJens Axboe 
1960034af03SJens Axboe 	if (max_discard & (q->limits.discard_granularity - 1))
1970034af03SJens Axboe 		return -EINVAL;
1980034af03SJens Axboe 
1990034af03SJens Axboe 	max_discard >>= 9;
2000034af03SJens Axboe 	if (max_discard > UINT_MAX)
2010034af03SJens Axboe 		return -EINVAL;
2020034af03SJens Axboe 
2030034af03SJens Axboe 	if (max_discard > q->limits.max_hw_discard_sectors)
2040034af03SJens Axboe 		max_discard = q->limits.max_hw_discard_sectors;
2050034af03SJens Axboe 
2060034af03SJens Axboe 	q->limits.max_discard_sectors = max_discard;
2070034af03SJens Axboe 	return ret;
2080034af03SJens Axboe }
2090034af03SJens Axboe 
21098262f27SMartin K. Petersen static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
21198262f27SMartin K. Petersen {
21248920ff2SChristoph Hellwig 	return queue_var_show(0, page);
21398262f27SMartin K. Petersen }
21498262f27SMartin K. Petersen 
2154363ac7cSMartin K. Petersen static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
2164363ac7cSMartin K. Petersen {
21773bd66d9SChristoph Hellwig 	return queue_var_show(0, page);
2184363ac7cSMartin K. Petersen }
2194363ac7cSMartin K. Petersen 
220a6f0788eSChaitanya Kulkarni static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
221a6f0788eSChaitanya Kulkarni {
222a6f0788eSChaitanya Kulkarni 	return sprintf(page, "%llu\n",
223a6f0788eSChaitanya Kulkarni 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
224a6f0788eSChaitanya Kulkarni }
2254363ac7cSMartin K. Petersen 
226a805a4faSDamien Le Moal static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
227a805a4faSDamien Le Moal 						 char *page)
228a805a4faSDamien Le Moal {
229a805a4faSDamien Le Moal 	return queue_var_show(queue_zone_write_granularity(q), page);
230a805a4faSDamien Le Moal }
231a805a4faSDamien Le Moal 
2320512a75bSKeith Busch static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
2330512a75bSKeith Busch {
2340512a75bSKeith Busch 	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
2350512a75bSKeith Busch 
2360512a75bSKeith Busch 	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
2370512a75bSKeith Busch }
2380512a75bSKeith Busch 
2398324aa91SJens Axboe static ssize_t
2408324aa91SJens Axboe queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
2418324aa91SJens Axboe {
2428324aa91SJens Axboe 	unsigned long max_sectors_kb,
243ae03bf63SMartin K. Petersen 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
24409cbfeafSKirill A. Shutemov 			page_kb = 1 << (PAGE_SHIFT - 10);
2458324aa91SJens Axboe 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
2468324aa91SJens Axboe 
247b1f3b64dSDave Reisner 	if (ret < 0)
248b1f3b64dSDave Reisner 		return ret;
249b1f3b64dSDave Reisner 
250ca369d51SMartin K. Petersen 	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
251ca369d51SMartin K. Petersen 					 q->limits.max_dev_sectors >> 1);
252ca369d51SMartin K. Petersen 
2538324aa91SJens Axboe 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
2548324aa91SJens Axboe 		return -EINVAL;
2557c239517SWu Fengguang 
2560d945c1fSChristoph Hellwig 	spin_lock_irq(&q->queue_lock);
257c295fc05SNikanth Karthikesan 	q->limits.max_sectors = max_sectors_kb << 1;
258d152c682SChristoph Hellwig 	if (q->disk)
259d152c682SChristoph Hellwig 		q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
2600d945c1fSChristoph Hellwig 	spin_unlock_irq(&q->queue_lock);
2618324aa91SJens Axboe 
2628324aa91SJens Axboe 	return ret;
2638324aa91SJens Axboe }
2648324aa91SJens Axboe 
2658324aa91SJens Axboe static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
2668324aa91SJens Axboe {
267ae03bf63SMartin K. Petersen 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
2688324aa91SJens Axboe 
2698c390ff9SMax Gurtovoy 	return queue_var_show(max_hw_sectors_kb, page);
2708324aa91SJens Axboe }
2718324aa91SJens Axboe 
27228af7428SMax Gurtovoy static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
27328af7428SMax Gurtovoy {
2748c390ff9SMax Gurtovoy 	return queue_var_show(q->limits.virt_boundary_mask, page);
27528af7428SMax Gurtovoy }
27628af7428SMax Gurtovoy 
2773850e13fSKeith Busch static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
2783850e13fSKeith Busch {
2793850e13fSKeith Busch 	return queue_var_show(queue_dma_alignment(q), page);
2803850e13fSKeith Busch }
2813850e13fSKeith Busch 
282956bcb7cSJens Axboe #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
283956bcb7cSJens Axboe static ssize_t								\
284fc93fe14SChristoph Hellwig queue_##name##_show(struct request_queue *q, char *page)		\
285956bcb7cSJens Axboe {									\
286956bcb7cSJens Axboe 	int bit;							\
287956bcb7cSJens Axboe 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
288956bcb7cSJens Axboe 	return queue_var_show(neg ? !bit : bit, page);			\
289956bcb7cSJens Axboe }									\
290956bcb7cSJens Axboe static ssize_t								\
291fc93fe14SChristoph Hellwig queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
292956bcb7cSJens Axboe {									\
293956bcb7cSJens Axboe 	unsigned long val;						\
294956bcb7cSJens Axboe 	ssize_t ret;							\
295956bcb7cSJens Axboe 	ret = queue_var_store(&val, page, count);			\
296c678ef52SArnd Bergmann 	if (ret < 0)							\
297c678ef52SArnd Bergmann 		 return ret;						\
298956bcb7cSJens Axboe 	if (neg)							\
299956bcb7cSJens Axboe 		val = !val;						\
300956bcb7cSJens Axboe 									\
301956bcb7cSJens Axboe 	if (val)							\
3028814ce8aSBart Van Assche 		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
303956bcb7cSJens Axboe 	else								\
3048814ce8aSBart Van Assche 		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
305956bcb7cSJens Axboe 	return ret;							\
3061308835fSBartlomiej Zolnierkiewicz }
3071308835fSBartlomiej Zolnierkiewicz 
308956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
309956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
310956bcb7cSJens Axboe QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
3111cb039f3SChristoph Hellwig QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
312956bcb7cSJens Axboe #undef QUEUE_SYSFS_BIT_FNS
3131308835fSBartlomiej Zolnierkiewicz 
314797476b8SDamien Le Moal static ssize_t queue_zoned_show(struct request_queue *q, char *page)
315797476b8SDamien Le Moal {
316797476b8SDamien Le Moal 	switch (blk_queue_zoned_model(q)) {
317797476b8SDamien Le Moal 	case BLK_ZONED_HA:
318797476b8SDamien Le Moal 		return sprintf(page, "host-aware\n");
319797476b8SDamien Le Moal 	case BLK_ZONED_HM:
320797476b8SDamien Le Moal 		return sprintf(page, "host-managed\n");
321797476b8SDamien Le Moal 	default:
322797476b8SDamien Le Moal 		return sprintf(page, "none\n");
323797476b8SDamien Le Moal 	}
324797476b8SDamien Le Moal }
325797476b8SDamien Le Moal 
326965b652eSDamien Le Moal static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
327965b652eSDamien Le Moal {
328d86e716aSChristoph Hellwig 	return queue_var_show(disk_nr_zones(q->disk), page);
329965b652eSDamien Le Moal }
330965b652eSDamien Le Moal 
331e15864f8SNiklas Cassel static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
332e15864f8SNiklas Cassel {
3331dc01720SChristoph Hellwig 	return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
334e15864f8SNiklas Cassel }
335e15864f8SNiklas Cassel 
336659bf827SNiklas Cassel static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
337659bf827SNiklas Cassel {
3381dc01720SChristoph Hellwig 	return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
339659bf827SNiklas Cassel }
340659bf827SNiklas Cassel 
341ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
342ac9fafa1SAlan D. Brunelle {
343488991e2SAlan D. Brunelle 	return queue_var_show((blk_queue_nomerges(q) << 1) |
344488991e2SAlan D. Brunelle 			       blk_queue_noxmerges(q), page);
345ac9fafa1SAlan D. Brunelle }
346ac9fafa1SAlan D. Brunelle 
347ac9fafa1SAlan D. Brunelle static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
348ac9fafa1SAlan D. Brunelle 				    size_t count)
349ac9fafa1SAlan D. Brunelle {
350ac9fafa1SAlan D. Brunelle 	unsigned long nm;
351ac9fafa1SAlan D. Brunelle 	ssize_t ret = queue_var_store(&nm, page, count);
352ac9fafa1SAlan D. Brunelle 
353b1f3b64dSDave Reisner 	if (ret < 0)
354b1f3b64dSDave Reisner 		return ret;
355b1f3b64dSDave Reisner 
35657d74df9SChristoph Hellwig 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
35757d74df9SChristoph Hellwig 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
358488991e2SAlan D. Brunelle 	if (nm == 2)
35957d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
360488991e2SAlan D. Brunelle 	else if (nm)
36157d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
3621308835fSBartlomiej Zolnierkiewicz 
363ac9fafa1SAlan D. Brunelle 	return ret;
364ac9fafa1SAlan D. Brunelle }
365ac9fafa1SAlan D. Brunelle 
366c7c22e4dSJens Axboe static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
367c7c22e4dSJens Axboe {
3689cb308ceSXiaotian Feng 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
3695757a6d7SDan Williams 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
370c7c22e4dSJens Axboe 
3715757a6d7SDan Williams 	return queue_var_show(set << force, page);
372c7c22e4dSJens Axboe }
373c7c22e4dSJens Axboe 
374c7c22e4dSJens Axboe static ssize_t
375c7c22e4dSJens Axboe queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
376c7c22e4dSJens Axboe {
377c7c22e4dSJens Axboe 	ssize_t ret = -EINVAL;
3780a06ff06SChristoph Hellwig #ifdef CONFIG_SMP
379c7c22e4dSJens Axboe 	unsigned long val;
380c7c22e4dSJens Axboe 
381c7c22e4dSJens Axboe 	ret = queue_var_store(&val, page, count);
382b1f3b64dSDave Reisner 	if (ret < 0)
383b1f3b64dSDave Reisner 		return ret;
384b1f3b64dSDave Reisner 
385e8037d49SEric Seppanen 	if (val == 2) {
38657d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
38757d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
388e8037d49SEric Seppanen 	} else if (val == 1) {
38957d74df9SChristoph Hellwig 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
39057d74df9SChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
391e8037d49SEric Seppanen 	} else if (val == 0) {
39257d74df9SChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
39357d74df9SChristoph Hellwig 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
3945757a6d7SDan Williams 	}
395c7c22e4dSJens Axboe #endif
396c7c22e4dSJens Axboe 	return ret;
397c7c22e4dSJens Axboe }
3988324aa91SJens Axboe 
39906426adfSJens Axboe static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
40006426adfSJens Axboe {
40164f1c21eSJens Axboe 	int val;
40264f1c21eSJens Axboe 
40329ece8b4SYufen Yu 	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
40429ece8b4SYufen Yu 		val = BLK_MQ_POLL_CLASSIC;
40564f1c21eSJens Axboe 	else
40664f1c21eSJens Axboe 		val = q->poll_nsec / 1000;
40764f1c21eSJens Axboe 
40864f1c21eSJens Axboe 	return sprintf(page, "%d\n", val);
40906426adfSJens Axboe }
41006426adfSJens Axboe 
41106426adfSJens Axboe static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
41206426adfSJens Axboe 				size_t count)
41306426adfSJens Axboe {
41464f1c21eSJens Axboe 	int err, val;
41506426adfSJens Axboe 
41606426adfSJens Axboe 	if (!q->mq_ops || !q->mq_ops->poll)
41706426adfSJens Axboe 		return -EINVAL;
41806426adfSJens Axboe 
41964f1c21eSJens Axboe 	err = kstrtoint(page, 10, &val);
42064f1c21eSJens Axboe 	if (err < 0)
42164f1c21eSJens Axboe 		return err;
42206426adfSJens Axboe 
42329ece8b4SYufen Yu 	if (val == BLK_MQ_POLL_CLASSIC)
42429ece8b4SYufen Yu 		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
42529ece8b4SYufen Yu 	else if (val >= 0)
42664f1c21eSJens Axboe 		q->poll_nsec = val * 1000;
42729ece8b4SYufen Yu 	else
42829ece8b4SYufen Yu 		return -EINVAL;
42964f1c21eSJens Axboe 
43064f1c21eSJens Axboe 	return count;
43106426adfSJens Axboe }
43206426adfSJens Axboe 
43305229beeSJens Axboe static ssize_t queue_poll_show(struct request_queue *q, char *page)
43405229beeSJens Axboe {
43505229beeSJens Axboe 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
43605229beeSJens Axboe }
43705229beeSJens Axboe 
43805229beeSJens Axboe static ssize_t queue_poll_store(struct request_queue *q, const char *page,
43905229beeSJens Axboe 				size_t count)
44005229beeSJens Axboe {
441a614dd22SChristoph Hellwig 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
44205229beeSJens Axboe 		return -EINVAL;
443a614dd22SChristoph Hellwig 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
444a614dd22SChristoph Hellwig 	pr_info_ratelimited("please use driver specific parameters instead.\n");
445a614dd22SChristoph Hellwig 	return count;
44605229beeSJens Axboe }
44705229beeSJens Axboe 
44865cd1d13SWeiping Zhang static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
44965cd1d13SWeiping Zhang {
45065cd1d13SWeiping Zhang 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
45165cd1d13SWeiping Zhang }
45265cd1d13SWeiping Zhang 
45365cd1d13SWeiping Zhang static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
45465cd1d13SWeiping Zhang 				  size_t count)
45565cd1d13SWeiping Zhang {
45665cd1d13SWeiping Zhang 	unsigned int val;
45765cd1d13SWeiping Zhang 	int err;
45865cd1d13SWeiping Zhang 
45965cd1d13SWeiping Zhang 	err = kstrtou32(page, 10, &val);
46065cd1d13SWeiping Zhang 	if (err || val == 0)
46165cd1d13SWeiping Zhang 		return -EINVAL;
46265cd1d13SWeiping Zhang 
46365cd1d13SWeiping Zhang 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
46465cd1d13SWeiping Zhang 
46565cd1d13SWeiping Zhang 	return count;
46665cd1d13SWeiping Zhang }
46765cd1d13SWeiping Zhang 
46887760e5eSJens Axboe static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
46987760e5eSJens Axboe {
470a7905043SJosef Bacik 	if (!wbt_rq_qos(q))
47187760e5eSJens Axboe 		return -EINVAL;
47287760e5eSJens Axboe 
4733642ef4dSYu Kuai 	if (wbt_disabled(q))
4743642ef4dSYu Kuai 		return sprintf(page, "0\n");
4753642ef4dSYu Kuai 
476a7905043SJosef Bacik 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
47787760e5eSJens Axboe }
47887760e5eSJens Axboe 
47987760e5eSJens Axboe static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
48087760e5eSJens Axboe 				  size_t count)
48187760e5eSJens Axboe {
482a7905043SJosef Bacik 	struct rq_qos *rqos;
48387760e5eSJens Axboe 	ssize_t ret;
48480e091d1SJens Axboe 	s64 val;
48587760e5eSJens Axboe 
48687760e5eSJens Axboe 	ret = queue_var_store64(&val, page);
48787760e5eSJens Axboe 	if (ret < 0)
48887760e5eSJens Axboe 		return ret;
489d62118b6SJens Axboe 	if (val < -1)
490d62118b6SJens Axboe 		return -EINVAL;
491d62118b6SJens Axboe 
492a7905043SJosef Bacik 	rqos = wbt_rq_qos(q);
493a7905043SJosef Bacik 	if (!rqos) {
494d62118b6SJens Axboe 		ret = wbt_init(q);
495d62118b6SJens Axboe 		if (ret)
496d62118b6SJens Axboe 			return ret;
497d62118b6SJens Axboe 	}
49887760e5eSJens Axboe 
49980e091d1SJens Axboe 	if (val == -1)
500a7905043SJosef Bacik 		val = wbt_default_latency_nsec(q);
50180e091d1SJens Axboe 	else if (val >= 0)
502a7905043SJosef Bacik 		val *= 1000ULL;
503d62118b6SJens Axboe 
504b7143fe6SAleksei Zakharov 	if (wbt_get_min_lat(q) == val)
505b7143fe6SAleksei Zakharov 		return count;
506b7143fe6SAleksei Zakharov 
507c125311dSJens Axboe 	/*
508c125311dSJens Axboe 	 * Ensure that the queue is idled, in case the latency update
509c125311dSJens Axboe 	 * ends up either enabling or disabling wbt completely. We can't
510c125311dSJens Axboe 	 * have IO inflight if that happens.
511c125311dSJens Axboe 	 */
512c125311dSJens Axboe 	blk_mq_freeze_queue(q);
513c125311dSJens Axboe 	blk_mq_quiesce_queue(q);
51480e091d1SJens Axboe 
515c125311dSJens Axboe 	wbt_set_min_lat(q, val);
516c125311dSJens Axboe 
517c125311dSJens Axboe 	blk_mq_unquiesce_queue(q);
518c125311dSJens Axboe 	blk_mq_unfreeze_queue(q);
519c125311dSJens Axboe 
52087760e5eSJens Axboe 	return count;
52187760e5eSJens Axboe }
52287760e5eSJens Axboe 
52393e9d8e8SJens Axboe static ssize_t queue_wc_show(struct request_queue *q, char *page)
52493e9d8e8SJens Axboe {
52593e9d8e8SJens Axboe 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
52693e9d8e8SJens Axboe 		return sprintf(page, "write back\n");
52793e9d8e8SJens Axboe 
52893e9d8e8SJens Axboe 	return sprintf(page, "write through\n");
52993e9d8e8SJens Axboe }
53093e9d8e8SJens Axboe 
53193e9d8e8SJens Axboe static ssize_t queue_wc_store(struct request_queue *q, const char *page,
53293e9d8e8SJens Axboe 			      size_t count)
53393e9d8e8SJens Axboe {
53493e9d8e8SJens Axboe 	int set = -1;
53593e9d8e8SJens Axboe 
53693e9d8e8SJens Axboe 	if (!strncmp(page, "write back", 10))
53793e9d8e8SJens Axboe 		set = 1;
53893e9d8e8SJens Axboe 	else if (!strncmp(page, "write through", 13) ||
53993e9d8e8SJens Axboe 		 !strncmp(page, "none", 4))
54093e9d8e8SJens Axboe 		set = 0;
54193e9d8e8SJens Axboe 
54293e9d8e8SJens Axboe 	if (set == -1)
54393e9d8e8SJens Axboe 		return -EINVAL;
54493e9d8e8SJens Axboe 
54593e9d8e8SJens Axboe 	if (set)
5468814ce8aSBart Van Assche 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
54793e9d8e8SJens Axboe 	else
5488814ce8aSBart Van Assche 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
54993e9d8e8SJens Axboe 
55093e9d8e8SJens Axboe 	return count;
55193e9d8e8SJens Axboe }
55293e9d8e8SJens Axboe 
5536fcefbe5SKent Overstreet static ssize_t queue_fua_show(struct request_queue *q, char *page)
5546fcefbe5SKent Overstreet {
5556fcefbe5SKent Overstreet 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
5566fcefbe5SKent Overstreet }
5576fcefbe5SKent Overstreet 
558ea6ca600SYigal Korman static ssize_t queue_dax_show(struct request_queue *q, char *page)
559ea6ca600SYigal Korman {
560ea6ca600SYigal Korman 	return queue_var_show(blk_queue_dax(q), page);
561ea6ca600SYigal Korman }
562ea6ca600SYigal Korman 
56335626147SChristoph Hellwig #define QUEUE_RO_ENTRY(_prefix, _name)			\
56435626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = {	\
56535626147SChristoph Hellwig 	.attr	= { .name = _name, .mode = 0444 },	\
56635626147SChristoph Hellwig 	.show	= _prefix##_show,			\
5678324aa91SJens Axboe };
5688324aa91SJens Axboe 
56935626147SChristoph Hellwig #define QUEUE_RW_ENTRY(_prefix, _name)			\
57035626147SChristoph Hellwig static struct queue_sysfs_entry _prefix##_entry = {	\
57135626147SChristoph Hellwig 	.attr	= { .name = _name, .mode = 0644 },	\
57235626147SChristoph Hellwig 	.show	= _prefix##_show,			\
57335626147SChristoph Hellwig 	.store	= _prefix##_store,			\
5748324aa91SJens Axboe };
5758324aa91SJens Axboe 
57635626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_requests, "nr_requests");
57735626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
57835626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
57935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
58035626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
58135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
58235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
58335626147SChristoph Hellwig QUEUE_RW_ENTRY(elv_iosched, "scheduler");
5848324aa91SJens Axboe 
58535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
58635626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
58735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
58835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
58935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
5908324aa91SJens Axboe 
59135626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
59235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
59335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
59435626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
59535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
596c77a5710SMartin K. Petersen 
59735626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
59835626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
59935626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
600a805a4faSDamien Le Moal QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
6011e739730SChristoph Hellwig 
60235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_zoned, "zoned");
60335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
60435626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
60535626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
60613f05c8dSMartin K. Petersen 
60735626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
60835626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
60935626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll, "io_poll");
61035626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
61135626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wc, "write_cache");
61235626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_fua, "fua");
61335626147SChristoph Hellwig QUEUE_RO_ENTRY(queue_dax, "dax");
61435626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
61535626147SChristoph Hellwig QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
61628af7428SMax Gurtovoy QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
6173850e13fSKeith Busch QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
618c77a5710SMartin K. Petersen 
61935626147SChristoph Hellwig #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
62035626147SChristoph Hellwig QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
62135626147SChristoph Hellwig #endif
6228324aa91SJens Axboe 
62335626147SChristoph Hellwig /* legacy alias for logical_block_size: */
624e68b903cSMartin K. Petersen static struct queue_sysfs_entry queue_hw_sector_size_entry = {
6255657a819SJoe Perches 	.attr = {.name = "hw_sector_size", .mode = 0444 },
626e1defc4fSMartin K. Petersen 	.show = queue_logical_block_size_show,
627e1defc4fSMartin K. Petersen };
628e1defc4fSMartin K. Petersen 
629fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_nonrot, "rotational");
630fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_iostats, "iostats");
631fc93fe14SChristoph Hellwig QUEUE_RW_ENTRY(queue_random, "add_random");
6321cb039f3SChristoph Hellwig QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
633e2e1a148SJens Axboe 
6344d25339eSWeiping Zhang static struct attribute *queue_attrs[] = {
6358324aa91SJens Axboe 	&queue_requests_entry.attr,
6368324aa91SJens Axboe 	&queue_ra_entry.attr,
6378324aa91SJens Axboe 	&queue_max_hw_sectors_entry.attr,
6388324aa91SJens Axboe 	&queue_max_sectors_entry.attr,
639c77a5710SMartin K. Petersen 	&queue_max_segments_entry.attr,
6401e739730SChristoph Hellwig 	&queue_max_discard_segments_entry.attr,
64113f05c8dSMartin K. Petersen 	&queue_max_integrity_segments_entry.attr,
642c77a5710SMartin K. Petersen 	&queue_max_segment_size_entry.attr,
64335626147SChristoph Hellwig 	&elv_iosched_entry.attr,
644e68b903cSMartin K. Petersen 	&queue_hw_sector_size_entry.attr,
645e1defc4fSMartin K. Petersen 	&queue_logical_block_size_entry.attr,
646c72758f3SMartin K. Petersen 	&queue_physical_block_size_entry.attr,
64787caf97cSHannes Reinecke 	&queue_chunk_sectors_entry.attr,
648c72758f3SMartin K. Petersen 	&queue_io_min_entry.attr,
649c72758f3SMartin K. Petersen 	&queue_io_opt_entry.attr,
65086b37281SMartin K. Petersen 	&queue_discard_granularity_entry.attr,
65186b37281SMartin K. Petersen 	&queue_discard_max_entry.attr,
6520034af03SJens Axboe 	&queue_discard_max_hw_entry.attr,
65398262f27SMartin K. Petersen 	&queue_discard_zeroes_data_entry.attr,
6544363ac7cSMartin K. Petersen 	&queue_write_same_max_entry.attr,
655a6f0788eSChaitanya Kulkarni 	&queue_write_zeroes_max_entry.attr,
6560512a75bSKeith Busch 	&queue_zone_append_max_entry.attr,
657a805a4faSDamien Le Moal 	&queue_zone_write_granularity_entry.attr,
6581308835fSBartlomiej Zolnierkiewicz 	&queue_nonrot_entry.attr,
659797476b8SDamien Le Moal 	&queue_zoned_entry.attr,
660965b652eSDamien Le Moal 	&queue_nr_zones_entry.attr,
661e15864f8SNiklas Cassel 	&queue_max_open_zones_entry.attr,
662659bf827SNiklas Cassel 	&queue_max_active_zones_entry.attr,
663ac9fafa1SAlan D. Brunelle 	&queue_nomerges_entry.attr,
664c7c22e4dSJens Axboe 	&queue_rq_affinity_entry.attr,
665bc58ba94SJens Axboe 	&queue_iostats_entry.attr,
6661cb039f3SChristoph Hellwig 	&queue_stable_writes_entry.attr,
667e2e1a148SJens Axboe 	&queue_random_entry.attr,
66805229beeSJens Axboe 	&queue_poll_entry.attr,
66993e9d8e8SJens Axboe 	&queue_wc_entry.attr,
6706fcefbe5SKent Overstreet 	&queue_fua_entry.attr,
671ea6ca600SYigal Korman 	&queue_dax_entry.attr,
67287760e5eSJens Axboe 	&queue_wb_lat_entry.attr,
67306426adfSJens Axboe 	&queue_poll_delay_entry.attr,
67465cd1d13SWeiping Zhang 	&queue_io_timeout_entry.attr,
675297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
67635626147SChristoph Hellwig 	&blk_throtl_sample_time_entry.attr,
677297e3d85SShaohua Li #endif
67828af7428SMax Gurtovoy 	&queue_virt_boundary_mask_entry.attr,
6793850e13fSKeith Busch 	&queue_dma_alignment_entry.attr,
6808324aa91SJens Axboe 	NULL,
6818324aa91SJens Axboe };
6828324aa91SJens Axboe 
6834d25339eSWeiping Zhang static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
6844d25339eSWeiping Zhang 				int n)
6854d25339eSWeiping Zhang {
6864d25339eSWeiping Zhang 	struct request_queue *q =
6874d25339eSWeiping Zhang 		container_of(kobj, struct request_queue, kobj);
6884d25339eSWeiping Zhang 
6894d25339eSWeiping Zhang 	if (attr == &queue_io_timeout_entry.attr &&
6904d25339eSWeiping Zhang 		(!q->mq_ops || !q->mq_ops->timeout))
6914d25339eSWeiping Zhang 			return 0;
6924d25339eSWeiping Zhang 
693659bf827SNiklas Cassel 	if ((attr == &queue_max_open_zones_entry.attr ||
694659bf827SNiklas Cassel 	     attr == &queue_max_active_zones_entry.attr) &&
695e15864f8SNiklas Cassel 	    !blk_queue_is_zoned(q))
696e15864f8SNiklas Cassel 		return 0;
697e15864f8SNiklas Cassel 
6984d25339eSWeiping Zhang 	return attr->mode;
6994d25339eSWeiping Zhang }
7004d25339eSWeiping Zhang 
7014d25339eSWeiping Zhang static struct attribute_group queue_attr_group = {
7024d25339eSWeiping Zhang 	.attrs = queue_attrs,
7034d25339eSWeiping Zhang 	.is_visible = queue_attr_visible,
7044d25339eSWeiping Zhang };
7054d25339eSWeiping Zhang 
7064d25339eSWeiping Zhang 
7078324aa91SJens Axboe #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
7088324aa91SJens Axboe 
7098324aa91SJens Axboe static ssize_t
7108324aa91SJens Axboe queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
7118324aa91SJens Axboe {
7128324aa91SJens Axboe 	struct queue_sysfs_entry *entry = to_queue(attr);
7138324aa91SJens Axboe 	struct request_queue *q =
7148324aa91SJens Axboe 		container_of(kobj, struct request_queue, kobj);
7158324aa91SJens Axboe 	ssize_t res;
7168324aa91SJens Axboe 
7178324aa91SJens Axboe 	if (!entry->show)
7188324aa91SJens Axboe 		return -EIO;
7198324aa91SJens Axboe 	mutex_lock(&q->sysfs_lock);
7208324aa91SJens Axboe 	res = entry->show(q, page);
7218324aa91SJens Axboe 	mutex_unlock(&q->sysfs_lock);
7228324aa91SJens Axboe 	return res;
7238324aa91SJens Axboe }
7248324aa91SJens Axboe 
7258324aa91SJens Axboe static ssize_t
7268324aa91SJens Axboe queue_attr_store(struct kobject *kobj, struct attribute *attr,
7278324aa91SJens Axboe 		    const char *page, size_t length)
7288324aa91SJens Axboe {
7298324aa91SJens Axboe 	struct queue_sysfs_entry *entry = to_queue(attr);
7306728cb0eSJens Axboe 	struct request_queue *q;
7318324aa91SJens Axboe 	ssize_t res;
7328324aa91SJens Axboe 
7338324aa91SJens Axboe 	if (!entry->store)
7348324aa91SJens Axboe 		return -EIO;
7356728cb0eSJens Axboe 
7366728cb0eSJens Axboe 	q = container_of(kobj, struct request_queue, kobj);
7378324aa91SJens Axboe 	mutex_lock(&q->sysfs_lock);
7388324aa91SJens Axboe 	res = entry->store(q, page, length);
7398324aa91SJens Axboe 	mutex_unlock(&q->sysfs_lock);
7408324aa91SJens Axboe 	return res;
7418324aa91SJens Axboe }
7428324aa91SJens Axboe 
743548bc8e1STejun Heo static void blk_free_queue_rcu(struct rcu_head *rcu_head)
744548bc8e1STejun Heo {
74580bd4a7aSChristoph Hellwig 	kmem_cache_free(blk_requestq_cachep,
74680bd4a7aSChristoph Hellwig 			container_of(rcu_head, struct request_queue, rcu_head));
747548bc8e1STejun Heo }
748548bc8e1STejun Heo 
7498324aa91SJens Axboe /**
750e8c7d14aSLuis Chamberlain  * blk_release_queue - releases all allocated resources of the request_queue
751e8c7d14aSLuis Chamberlain  * @kobj: pointer to a kobject, whose container is a request_queue
7528324aa91SJens Axboe  *
753e8c7d14aSLuis Chamberlain  * This function releases all allocated resources of the request queue.
754e8c7d14aSLuis Chamberlain  *
755e8c7d14aSLuis Chamberlain  * The struct request_queue refcount is incremented with blk_get_queue() and
756e8c7d14aSLuis Chamberlain  * decremented with blk_put_queue(). Once the refcount reaches 0 this function
757e8c7d14aSLuis Chamberlain  * is called.
758e8c7d14aSLuis Chamberlain  *
759e8c7d14aSLuis Chamberlain  * Drivers exist which depend on the release of the request_queue to be
760e8c7d14aSLuis Chamberlain  * synchronous, it should not be deferred.
761e8c7d14aSLuis Chamberlain  *
762e8c7d14aSLuis Chamberlain  * Context: can sleep
763dc9edc44SBart Van Assche  */
764e8c7d14aSLuis Chamberlain static void blk_release_queue(struct kobject *kobj)
7658324aa91SJens Axboe {
766e8c7d14aSLuis Chamberlain 	struct request_queue *q =
767e8c7d14aSLuis Chamberlain 		container_of(kobj, struct request_queue, kobj);
768e8c7d14aSLuis Chamberlain 
769e8c7d14aSLuis Chamberlain 	might_sleep();
7708324aa91SJens Axboe 
771ba3e8456SMing Lei 	percpu_ref_exit(&q->q_usage_counter);
772ba3e8456SMing Lei 
77348b5c1fbSJens Axboe 	if (q->poll_stat)
77434dbad5dSOmar Sandoval 		blk_stat_remove_callback(q, q->poll_cb);
77534dbad5dSOmar Sandoval 	blk_stat_free_callback(q->poll_cb);
776777eb1bfSHannes Reinecke 
77734dbad5dSOmar Sandoval 	blk_free_queue_stats(q->stats);
77848b5c1fbSJens Axboe 	kfree(q->poll_stat);
77934dbad5dSOmar Sandoval 
780344e9ffcSJens Axboe 	if (queue_is_mq(q))
781e09aae7eSMing Lei 		blk_mq_release(q);
78218741986SChristoph Hellwig 
783798f2a6fSBo Liu 	ida_free(&blk_queue_ida, q->id);
784548bc8e1STejun Heo 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
7858324aa91SJens Axboe }
7868324aa91SJens Axboe 
78752cf25d0SEmese Revfy static const struct sysfs_ops queue_sysfs_ops = {
7888324aa91SJens Axboe 	.show	= queue_attr_show,
7898324aa91SJens Axboe 	.store	= queue_attr_store,
7908324aa91SJens Axboe };
7918324aa91SJens Axboe 
7924a8d14bbSChristoph Hellwig static const struct attribute_group *blk_queue_attr_groups[] = {
7934a8d14bbSChristoph Hellwig 	&queue_attr_group,
7944a8d14bbSChristoph Hellwig 	NULL
7954a8d14bbSChristoph Hellwig };
7964a8d14bbSChristoph Hellwig 
7978324aa91SJens Axboe struct kobj_type blk_queue_ktype = {
7984a8d14bbSChristoph Hellwig 	.default_groups = blk_queue_attr_groups,
7998324aa91SJens Axboe 	.sysfs_ops	= &queue_sysfs_ops,
8008324aa91SJens Axboe 	.release	= blk_release_queue,
8018324aa91SJens Axboe };
8028324aa91SJens Axboe 
803*6fc75f30SChristoph Hellwig static void blk_debugfs_remove(struct gendisk *disk)
804*6fc75f30SChristoph Hellwig {
805*6fc75f30SChristoph Hellwig 	struct request_queue *q = disk->queue;
806*6fc75f30SChristoph Hellwig 
807*6fc75f30SChristoph Hellwig 	mutex_lock(&q->debugfs_mutex);
808*6fc75f30SChristoph Hellwig 	blk_trace_shutdown(q);
809*6fc75f30SChristoph Hellwig 	debugfs_remove_recursive(q->debugfs_dir);
810*6fc75f30SChristoph Hellwig 	q->debugfs_dir = NULL;
811*6fc75f30SChristoph Hellwig 	q->sched_debugfs_dir = NULL;
812*6fc75f30SChristoph Hellwig 	q->rqos_debugfs_dir = NULL;
813*6fc75f30SChristoph Hellwig 	mutex_unlock(&q->debugfs_mutex);
814*6fc75f30SChristoph Hellwig }
815*6fc75f30SChristoph Hellwig 
8162c2086afSBart Van Assche /**
8172c2086afSBart Van Assche  * blk_register_queue - register a block layer queue with sysfs
8182c2086afSBart Van Assche  * @disk: Disk of which the request queue should be registered with sysfs.
8192c2086afSBart Van Assche  */
8208324aa91SJens Axboe int blk_register_queue(struct gendisk *disk)
8218324aa91SJens Axboe {
8228324aa91SJens Axboe 	struct request_queue *q = disk->queue;
8238682b92eSChristoph Hellwig 	int ret;
8248324aa91SJens Axboe 
825cecf5d87SMing Lei 	mutex_lock(&q->sysfs_dir_lock);
826b410aff2STahsin Erdogan 
8278682b92eSChristoph Hellwig 	ret = kobject_add(&q->kobj, &disk_to_dev(disk)->kobj, "queue");
828cc5c516dSChristoph Hellwig 	if (ret < 0)
829b410aff2STahsin Erdogan 		goto unlock;
8308324aa91SJens Axboe 
8315cf9c91bSChristoph Hellwig 	if (queue_is_mq(q))
8328682b92eSChristoph Hellwig 		blk_mq_sysfs_register(disk);
8335cf9c91bSChristoph Hellwig 	mutex_lock(&q->sysfs_lock);
8345cf9c91bSChristoph Hellwig 
83585e0cbbbSLuis Chamberlain 	mutex_lock(&q->debugfs_mutex);
83685e0cbbbSLuis Chamberlain 	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
83785e0cbbbSLuis Chamberlain 					    blk_debugfs_root);
8385cf9c91bSChristoph Hellwig 	if (queue_is_mq(q))
8399c1051aaSOmar Sandoval 		blk_mq_debugfs_register(q);
8405cf9c91bSChristoph Hellwig 	mutex_unlock(&q->debugfs_mutex);
841a2247f19SDamien Le Moal 
84222d0c408SChristoph Hellwig 	ret = disk_register_independent_access_ranges(disk);
843a2247f19SDamien Le Moal 	if (ret)
844a2247f19SDamien Le Moal 		goto put_dev;
845a2247f19SDamien Le Moal 
846344e9ffcSJens Axboe 	if (q->elevator) {
847cecf5d87SMing Lei 		ret = elv_register_queue(q, false);
848a2247f19SDamien Le Moal 		if (ret)
849a2247f19SDamien Le Moal 			goto put_dev;
850b410aff2STahsin Erdogan 	}
851cecf5d87SMing Lei 
852450deb93SChristoph Hellwig 	ret = blk_crypto_sysfs_register(disk);
85320f01f16SEric Biggers 	if (ret)
85420f01f16SEric Biggers 		goto put_dev;
85520f01f16SEric Biggers 
856cecf5d87SMing Lei 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
857cecf5d87SMing Lei 	wbt_enable_default(q);
8585f6dc752SChristoph Hellwig 	blk_throtl_register(disk);
859cecf5d87SMing Lei 
860cecf5d87SMing Lei 	/* Now everything is ready and send out KOBJ_ADD uevent */
861cecf5d87SMing Lei 	kobject_uevent(&q->kobj, KOBJ_ADD);
8620546858cSYufen Yu 	if (q->elevator)
863cecf5d87SMing Lei 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
864cecf5d87SMing Lei 	mutex_unlock(&q->sysfs_lock);
865cecf5d87SMing Lei 
866b410aff2STahsin Erdogan unlock:
867cecf5d87SMing Lei 	mutex_unlock(&q->sysfs_dir_lock);
868a72c374fSMing Lei 
869a72c374fSMing Lei 	/*
870a72c374fSMing Lei 	 * SCSI probing may synchronously create and destroy a lot of
871a72c374fSMing Lei 	 * request_queues for non-existent devices.  Shutting down a fully
872a72c374fSMing Lei 	 * functional queue takes measureable wallclock time as RCU grace
873a72c374fSMing Lei 	 * periods are involved.  To avoid excessive latency in these
874a72c374fSMing Lei 	 * cases, a request_queue starts out in a degraded mode which is
875a72c374fSMing Lei 	 * faster to shut down and is made fully functional here as
876a72c374fSMing Lei 	 * request_queues for non-existent devices never get registered.
877a72c374fSMing Lei 	 */
878a72c374fSMing Lei 	if (!blk_queue_init_done(q)) {
879a72c374fSMing Lei 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
880a72c374fSMing Lei 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
881a72c374fSMing Lei 	}
882a72c374fSMing Lei 
8838324aa91SJens Axboe 	return ret;
884a2247f19SDamien Le Moal 
885a2247f19SDamien Le Moal put_dev:
88620f01f16SEric Biggers 	elv_unregister_queue(q);
887a2247f19SDamien Le Moal 	disk_unregister_independent_access_ranges(disk);
888a2247f19SDamien Le Moal 	mutex_unlock(&q->sysfs_lock);
889a2247f19SDamien Le Moal 	mutex_unlock(&q->sysfs_dir_lock);
890a2247f19SDamien Le Moal 	kobject_del(&q->kobj);
891a2247f19SDamien Le Moal 
892a2247f19SDamien Le Moal 	return ret;
8938324aa91SJens Axboe }
8948324aa91SJens Axboe 
8952c2086afSBart Van Assche /**
8962c2086afSBart Van Assche  * blk_unregister_queue - counterpart of blk_register_queue()
8972c2086afSBart Van Assche  * @disk: Disk of which the request queue should be unregistered from sysfs.
8982c2086afSBart Van Assche  *
8992c2086afSBart Van Assche  * Note: the caller is responsible for guaranteeing that this function is called
9002c2086afSBart Van Assche  * after blk_register_queue() has finished.
9012c2086afSBart Van Assche  */
9028324aa91SJens Axboe void blk_unregister_queue(struct gendisk *disk)
9038324aa91SJens Axboe {
9048324aa91SJens Axboe 	struct request_queue *q = disk->queue;
9058324aa91SJens Axboe 
906fb199746SAkinobu Mita 	if (WARN_ON(!q))
907fb199746SAkinobu Mita 		return;
908fb199746SAkinobu Mita 
909fa70d2e2SMike Snitzer 	/* Return early if disk->queue was never registered. */
91058c898baSMing Lei 	if (!blk_queue_registered(q))
911fa70d2e2SMike Snitzer 		return;
912fa70d2e2SMike Snitzer 
913667257e8SMike Snitzer 	/*
9142c2086afSBart Van Assche 	 * Since sysfs_remove_dir() prevents adding new directory entries
9152c2086afSBart Van Assche 	 * before removal of existing entries starts, protect against
9162c2086afSBart Van Assche 	 * concurrent elv_iosched_store() calls.
917667257e8SMike Snitzer 	 */
918e9a823fbSDavid Jeffery 	mutex_lock(&q->sysfs_lock);
9198814ce8aSBart Van Assche 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
920cecf5d87SMing Lei 	mutex_unlock(&q->sysfs_lock);
921334335d2SOmar Sandoval 
922cecf5d87SMing Lei 	mutex_lock(&q->sysfs_dir_lock);
9232c2086afSBart Van Assche 	/*
9242c2086afSBart Van Assche 	 * Remove the sysfs attributes before unregistering the queue data
9252c2086afSBart Van Assche 	 * structures that can be modified through sysfs.
9262c2086afSBart Van Assche 	 */
927344e9ffcSJens Axboe 	if (queue_is_mq(q))
9288682b92eSChristoph Hellwig 		blk_mq_sysfs_unregister(disk);
929450deb93SChristoph Hellwig 	blk_crypto_sysfs_unregister(disk);
930667257e8SMike Snitzer 
931b89f625eSMing Lei 	mutex_lock(&q->sysfs_lock);
9322c2086afSBart Van Assche 	elv_unregister_queue(q);
933a2247f19SDamien Le Moal 	disk_unregister_independent_access_ranges(disk);
934b89f625eSMing Lei 	mutex_unlock(&q->sysfs_lock);
9350f692882SEric Biggers 
9360f692882SEric Biggers 	/* Now that we've deleted all child objects, we can delete the queue. */
9370f692882SEric Biggers 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
9380f692882SEric Biggers 	kobject_del(&q->kobj);
939cecf5d87SMing Lei 	mutex_unlock(&q->sysfs_dir_lock);
9402c2086afSBart Van Assche 
941*6fc75f30SChristoph Hellwig 	blk_debugfs_remove(disk);
9428324aa91SJens Axboe }
943