xref: /linux/block/blk-sysfs.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct gendisk *disk, char *page);
26 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
27 	int (*store_limit)(struct gendisk *disk, const char *page,
28 			size_t count, struct queue_limits *lim);
29 	void (*load_module)(struct gendisk *disk, const char *page, size_t count);
30 };
31 
32 static ssize_t
33 queue_var_show(unsigned long var, char *page)
34 {
35 	return sysfs_emit(page, "%lu\n", var);
36 }
37 
38 static ssize_t
39 queue_var_store(unsigned long *var, const char *page, size_t count)
40 {
41 	int err;
42 	unsigned long v;
43 
44 	err = kstrtoul(page, 10, &v);
45 	if (err || v > UINT_MAX)
46 		return -EINVAL;
47 
48 	*var = v;
49 
50 	return count;
51 }
52 
53 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
54 {
55 	return queue_var_show(disk->queue->nr_requests, page);
56 }
57 
58 static ssize_t
59 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
60 {
61 	unsigned long nr;
62 	int ret, err;
63 
64 	if (!queue_is_mq(disk->queue))
65 		return -EINVAL;
66 
67 	ret = queue_var_store(&nr, page, count);
68 	if (ret < 0)
69 		return ret;
70 
71 	if (nr < BLKDEV_MIN_RQ)
72 		nr = BLKDEV_MIN_RQ;
73 
74 	err = blk_mq_update_nr_requests(disk->queue, nr);
75 	if (err)
76 		return err;
77 
78 	return ret;
79 }
80 
81 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
82 {
83 	return queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
84 }
85 
86 static ssize_t
87 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
88 {
89 	unsigned long ra_kb;
90 	ssize_t ret;
91 
92 	ret = queue_var_store(&ra_kb, page, count);
93 	if (ret < 0)
94 		return ret;
95 	disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
96 	return ret;
97 }
98 
99 #define QUEUE_SYSFS_LIMIT_SHOW(_field)					\
100 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
101 {									\
102 	return queue_var_show(disk->queue->limits._field, page);	\
103 }
104 
105 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
106 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
107 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
108 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
109 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
110 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
111 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
112 QUEUE_SYSFS_LIMIT_SHOW(io_min)
113 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
114 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
115 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
116 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
117 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
118 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
119 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
120 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
121 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
122 
123 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field)			\
124 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
125 {									\
126 	return sysfs_emit(page, "%llu\n",				\
127 		(unsigned long long)disk->queue->limits._field <<	\
128 			SECTOR_SHIFT);					\
129 }
130 
131 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
132 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
133 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
134 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
135 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
136 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
137 
138 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field)			\
139 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
140 {									\
141 	return queue_var_show(disk->queue->limits._field >> 1, page);	\
142 }
143 
144 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
145 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
146 
147 #define QUEUE_SYSFS_SHOW_CONST(_name, _val)				\
148 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
149 {									\
150 	return sysfs_emit(page, "%d\n", _val);				\
151 }
152 
153 /* deprecated fields */
154 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
155 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
156 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
157 
158 static int queue_max_discard_sectors_store(struct gendisk *disk,
159 		const char *page, size_t count, struct queue_limits *lim)
160 {
161 	unsigned long max_discard_bytes;
162 	ssize_t ret;
163 
164 	ret = queue_var_store(&max_discard_bytes, page, count);
165 	if (ret < 0)
166 		return ret;
167 
168 	if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
169 		return -EINVAL;
170 
171 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
172 		return -EINVAL;
173 
174 	lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
175 	return 0;
176 }
177 
178 static int
179 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count,
180 		struct queue_limits *lim)
181 {
182 	unsigned long max_sectors_kb;
183 	ssize_t ret;
184 
185 	ret = queue_var_store(&max_sectors_kb, page, count);
186 	if (ret < 0)
187 		return ret;
188 
189 	lim->max_user_sectors = max_sectors_kb << 1;
190 	return 0;
191 }
192 
193 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
194 		size_t count, struct queue_limits *lim, blk_features_t feature)
195 {
196 	unsigned long val;
197 	ssize_t ret;
198 
199 	ret = queue_var_store(&val, page, count);
200 	if (ret < 0)
201 		return ret;
202 
203 	if (val)
204 		lim->features |= feature;
205 	else
206 		lim->features &= ~feature;
207 	return 0;
208 }
209 
210 #define QUEUE_SYSFS_FEATURE(_name, _feature)				\
211 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
212 {									\
213 	return sysfs_emit(page, "%u\n",					\
214 		!!(disk->queue->limits.features & _feature));		\
215 }									\
216 static int queue_##_name##_store(struct gendisk *disk,			\
217 		const char *page, size_t count, struct queue_limits *lim) \
218 {									\
219 	return queue_feature_store(disk, page, count, lim, _feature);	\
220 }
221 
222 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
223 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
224 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
225 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
226 
227 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature)			\
228 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
229 {									\
230 	return sysfs_emit(page, "%u\n",					\
231 		!!(disk->queue->limits.features & _feature));		\
232 }
233 
234 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
235 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
236 
237 static ssize_t queue_poll_show(struct gendisk *disk, char *page)
238 {
239 	if (queue_is_mq(disk->queue))
240 		return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
241 	return sysfs_emit(page, "%u\n",
242 		!!(disk->queue->limits.features & BLK_FEAT_POLL));
243 }
244 
245 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
246 {
247 	if (blk_queue_is_zoned(disk->queue))
248 		return sysfs_emit(page, "host-managed\n");
249 	return sysfs_emit(page, "none\n");
250 }
251 
252 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
253 {
254 	return queue_var_show(disk_nr_zones(disk), page);
255 }
256 
257 static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
258 {
259 	return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
260 }
261 
262 static int queue_iostats_passthrough_store(struct gendisk *disk,
263 		const char *page, size_t count, struct queue_limits *lim)
264 {
265 	unsigned long ios;
266 	ssize_t ret;
267 
268 	ret = queue_var_store(&ios, page, count);
269 	if (ret < 0)
270 		return ret;
271 
272 	if (ios)
273 		lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
274 	else
275 		lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
276 	return 0;
277 }
278 
279 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
280 {
281 	return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
282 			       blk_queue_noxmerges(disk->queue), page);
283 }
284 
285 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
286 				    size_t count)
287 {
288 	unsigned long nm;
289 	ssize_t ret = queue_var_store(&nm, page, count);
290 
291 	if (ret < 0)
292 		return ret;
293 
294 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, disk->queue);
295 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, disk->queue);
296 	if (nm == 2)
297 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, disk->queue);
298 	else if (nm)
299 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, disk->queue);
300 
301 	return ret;
302 }
303 
304 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
305 {
306 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
307 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
308 
309 	return queue_var_show(set << force, page);
310 }
311 
312 static ssize_t
313 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
314 {
315 	ssize_t ret = -EINVAL;
316 #ifdef CONFIG_SMP
317 	struct request_queue *q = disk->queue;
318 	unsigned long val;
319 
320 	ret = queue_var_store(&val, page, count);
321 	if (ret < 0)
322 		return ret;
323 
324 	if (val == 2) {
325 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
326 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
327 	} else if (val == 1) {
328 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
329 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
330 	} else if (val == 0) {
331 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
332 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
333 	}
334 #endif
335 	return ret;
336 }
337 
338 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
339 				size_t count)
340 {
341 	return count;
342 }
343 
344 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
345 				size_t count)
346 {
347 	if (!(disk->queue->limits.features & BLK_FEAT_POLL))
348 		return -EINVAL;
349 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
350 	pr_info_ratelimited("please use driver specific parameters instead.\n");
351 	return count;
352 }
353 
354 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
355 {
356 	return sysfs_emit(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
357 }
358 
359 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
360 				  size_t count)
361 {
362 	unsigned int val;
363 	int err;
364 
365 	err = kstrtou32(page, 10, &val);
366 	if (err || val == 0)
367 		return -EINVAL;
368 
369 	blk_queue_rq_timeout(disk->queue, msecs_to_jiffies(val));
370 
371 	return count;
372 }
373 
374 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
375 {
376 	if (blk_queue_write_cache(disk->queue))
377 		return sysfs_emit(page, "write back\n");
378 	return sysfs_emit(page, "write through\n");
379 }
380 
381 static int queue_wc_store(struct gendisk *disk, const char *page,
382 		size_t count, struct queue_limits *lim)
383 {
384 	bool disable;
385 
386 	if (!strncmp(page, "write back", 10)) {
387 		disable = false;
388 	} else if (!strncmp(page, "write through", 13) ||
389 		   !strncmp(page, "none", 4)) {
390 		disable = true;
391 	} else {
392 		return -EINVAL;
393 	}
394 
395 	if (disable)
396 		lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
397 	else
398 		lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
399 	return 0;
400 }
401 
402 #define QUEUE_RO_ENTRY(_prefix, _name)			\
403 static struct queue_sysfs_entry _prefix##_entry = {	\
404 	.attr	= { .name = _name, .mode = 0444 },	\
405 	.show	= _prefix##_show,			\
406 };
407 
408 #define QUEUE_RW_ENTRY(_prefix, _name)			\
409 static struct queue_sysfs_entry _prefix##_entry = {	\
410 	.attr	= { .name = _name, .mode = 0644 },	\
411 	.show	= _prefix##_show,			\
412 	.store	= _prefix##_store,			\
413 };
414 
415 #define QUEUE_LIM_RW_ENTRY(_prefix, _name)			\
416 static struct queue_sysfs_entry _prefix##_entry = {	\
417 	.attr		= { .name = _name, .mode = 0644 },	\
418 	.show		= _prefix##_show,			\
419 	.store_limit	= _prefix##_store,			\
420 }
421 
422 #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name)		\
423 static struct queue_sysfs_entry _prefix##_entry = {		\
424 	.attr		= { .name = _name, .mode = 0644 },	\
425 	.show		= _prefix##_show,			\
426 	.load_module	= _prefix##_load_module,		\
427 	.store		= _prefix##_store,			\
428 }
429 
430 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
431 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
432 QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
433 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
434 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
435 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
436 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
437 QUEUE_RW_LOAD_MODULE_ENTRY(elv_iosched, "scheduler");
438 
439 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
440 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
441 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
442 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
443 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
444 
445 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
446 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
447 QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
448 QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
449 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
450 
451 QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
452 QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
453 		"atomic_write_boundary_bytes");
454 QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
455 QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
456 
457 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
458 QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
459 QUEUE_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
460 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
461 
462 QUEUE_RO_ENTRY(queue_zoned, "zoned");
463 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
464 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
465 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
466 
467 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
468 QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
469 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
470 QUEUE_RW_ENTRY(queue_poll, "io_poll");
471 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
472 QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
473 QUEUE_RO_ENTRY(queue_fua, "fua");
474 QUEUE_RO_ENTRY(queue_dax, "dax");
475 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
476 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
477 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
478 
479 /* legacy alias for logical_block_size: */
480 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
481 	.attr = {.name = "hw_sector_size", .mode = 0444 },
482 	.show = queue_logical_block_size_show,
483 };
484 
485 QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational");
486 QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats");
487 QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random");
488 QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes");
489 
490 #ifdef CONFIG_BLK_WBT
491 static ssize_t queue_var_store64(s64 *var, const char *page)
492 {
493 	int err;
494 	s64 v;
495 
496 	err = kstrtos64(page, 10, &v);
497 	if (err < 0)
498 		return err;
499 
500 	*var = v;
501 	return 0;
502 }
503 
504 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
505 {
506 	if (!wbt_rq_qos(disk->queue))
507 		return -EINVAL;
508 
509 	if (wbt_disabled(disk->queue))
510 		return sysfs_emit(page, "0\n");
511 
512 	return sysfs_emit(page, "%llu\n",
513 		div_u64(wbt_get_min_lat(disk->queue), 1000));
514 }
515 
516 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
517 				  size_t count)
518 {
519 	struct request_queue *q = disk->queue;
520 	struct rq_qos *rqos;
521 	ssize_t ret;
522 	s64 val;
523 
524 	ret = queue_var_store64(&val, page);
525 	if (ret < 0)
526 		return ret;
527 	if (val < -1)
528 		return -EINVAL;
529 
530 	rqos = wbt_rq_qos(q);
531 	if (!rqos) {
532 		ret = wbt_init(disk);
533 		if (ret)
534 			return ret;
535 	}
536 
537 	if (val == -1)
538 		val = wbt_default_latency_nsec(q);
539 	else if (val >= 0)
540 		val *= 1000ULL;
541 
542 	if (wbt_get_min_lat(q) == val)
543 		return count;
544 
545 	/*
546 	 * Ensure that the queue is idled, in case the latency update
547 	 * ends up either enabling or disabling wbt completely. We can't
548 	 * have IO inflight if that happens.
549 	 */
550 	blk_mq_quiesce_queue(q);
551 
552 	wbt_set_min_lat(q, val);
553 
554 	blk_mq_unquiesce_queue(q);
555 
556 	return count;
557 }
558 
559 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
560 #endif
561 
562 /* Common attributes for bio-based and request-based queues. */
563 static struct attribute *queue_attrs[] = {
564 	&queue_ra_entry.attr,
565 	&queue_max_hw_sectors_entry.attr,
566 	&queue_max_sectors_entry.attr,
567 	&queue_max_segments_entry.attr,
568 	&queue_max_discard_segments_entry.attr,
569 	&queue_max_integrity_segments_entry.attr,
570 	&queue_max_segment_size_entry.attr,
571 	&queue_hw_sector_size_entry.attr,
572 	&queue_logical_block_size_entry.attr,
573 	&queue_physical_block_size_entry.attr,
574 	&queue_chunk_sectors_entry.attr,
575 	&queue_io_min_entry.attr,
576 	&queue_io_opt_entry.attr,
577 	&queue_discard_granularity_entry.attr,
578 	&queue_max_discard_sectors_entry.attr,
579 	&queue_max_hw_discard_sectors_entry.attr,
580 	&queue_discard_zeroes_data_entry.attr,
581 	&queue_atomic_write_max_sectors_entry.attr,
582 	&queue_atomic_write_boundary_sectors_entry.attr,
583 	&queue_atomic_write_unit_min_entry.attr,
584 	&queue_atomic_write_unit_max_entry.attr,
585 	&queue_write_same_max_entry.attr,
586 	&queue_max_write_zeroes_sectors_entry.attr,
587 	&queue_max_zone_append_sectors_entry.attr,
588 	&queue_zone_write_granularity_entry.attr,
589 	&queue_rotational_entry.attr,
590 	&queue_zoned_entry.attr,
591 	&queue_nr_zones_entry.attr,
592 	&queue_max_open_zones_entry.attr,
593 	&queue_max_active_zones_entry.attr,
594 	&queue_nomerges_entry.attr,
595 	&queue_iostats_passthrough_entry.attr,
596 	&queue_iostats_entry.attr,
597 	&queue_stable_writes_entry.attr,
598 	&queue_add_random_entry.attr,
599 	&queue_poll_entry.attr,
600 	&queue_wc_entry.attr,
601 	&queue_fua_entry.attr,
602 	&queue_dax_entry.attr,
603 	&queue_poll_delay_entry.attr,
604 	&queue_virt_boundary_mask_entry.attr,
605 	&queue_dma_alignment_entry.attr,
606 	NULL,
607 };
608 
609 /* Request-based queue attributes that are not relevant for bio-based queues. */
610 static struct attribute *blk_mq_queue_attrs[] = {
611 	&queue_requests_entry.attr,
612 	&elv_iosched_entry.attr,
613 	&queue_rq_affinity_entry.attr,
614 	&queue_io_timeout_entry.attr,
615 #ifdef CONFIG_BLK_WBT
616 	&queue_wb_lat_entry.attr,
617 #endif
618 	NULL,
619 };
620 
621 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
622 				int n)
623 {
624 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
625 	struct request_queue *q = disk->queue;
626 
627 	if ((attr == &queue_max_open_zones_entry.attr ||
628 	     attr == &queue_max_active_zones_entry.attr) &&
629 	    !blk_queue_is_zoned(q))
630 		return 0;
631 
632 	return attr->mode;
633 }
634 
635 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
636 					 struct attribute *attr, int n)
637 {
638 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
639 	struct request_queue *q = disk->queue;
640 
641 	if (!queue_is_mq(q))
642 		return 0;
643 
644 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
645 		return 0;
646 
647 	return attr->mode;
648 }
649 
650 static struct attribute_group queue_attr_group = {
651 	.attrs = queue_attrs,
652 	.is_visible = queue_attr_visible,
653 };
654 
655 static struct attribute_group blk_mq_queue_attr_group = {
656 	.attrs = blk_mq_queue_attrs,
657 	.is_visible = blk_mq_queue_attr_visible,
658 };
659 
660 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
661 
662 static ssize_t
663 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
664 {
665 	struct queue_sysfs_entry *entry = to_queue(attr);
666 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
667 	ssize_t res;
668 
669 	if (!entry->show)
670 		return -EIO;
671 	mutex_lock(&disk->queue->sysfs_lock);
672 	res = entry->show(disk, page);
673 	mutex_unlock(&disk->queue->sysfs_lock);
674 	return res;
675 }
676 
677 static ssize_t
678 queue_attr_store(struct kobject *kobj, struct attribute *attr,
679 		    const char *page, size_t length)
680 {
681 	struct queue_sysfs_entry *entry = to_queue(attr);
682 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
683 	struct request_queue *q = disk->queue;
684 	unsigned int noio_flag;
685 	ssize_t res;
686 
687 	if (!entry->store_limit && !entry->store)
688 		return -EIO;
689 
690 	/*
691 	 * If the attribute needs to load a module, do it before freezing the
692 	 * queue to ensure that the module file can be read when the request
693 	 * queue is the one for the device storing the module file.
694 	 */
695 	if (entry->load_module)
696 		entry->load_module(disk, page, length);
697 
698 	if (entry->store_limit) {
699 		struct queue_limits lim = queue_limits_start_update(q);
700 
701 		res = entry->store_limit(disk, page, length, &lim);
702 		if (res < 0) {
703 			queue_limits_cancel_update(q);
704 			return res;
705 		}
706 
707 		res = queue_limits_commit_update_frozen(q, &lim);
708 		if (res)
709 			return res;
710 		return length;
711 	}
712 
713 	mutex_lock(&q->sysfs_lock);
714 	blk_mq_freeze_queue(q);
715 	noio_flag = memalloc_noio_save();
716 	res = entry->store(disk, page, length);
717 	memalloc_noio_restore(noio_flag);
718 	blk_mq_unfreeze_queue(q);
719 	mutex_unlock(&q->sysfs_lock);
720 	return res;
721 }
722 
723 static const struct sysfs_ops queue_sysfs_ops = {
724 	.show	= queue_attr_show,
725 	.store	= queue_attr_store,
726 };
727 
728 static const struct attribute_group *blk_queue_attr_groups[] = {
729 	&queue_attr_group,
730 	&blk_mq_queue_attr_group,
731 	NULL
732 };
733 
734 static void blk_queue_release(struct kobject *kobj)
735 {
736 	/* nothing to do here, all data is associated with the parent gendisk */
737 }
738 
739 static const struct kobj_type blk_queue_ktype = {
740 	.default_groups = blk_queue_attr_groups,
741 	.sysfs_ops	= &queue_sysfs_ops,
742 	.release	= blk_queue_release,
743 };
744 
745 static void blk_debugfs_remove(struct gendisk *disk)
746 {
747 	struct request_queue *q = disk->queue;
748 
749 	mutex_lock(&q->debugfs_mutex);
750 	blk_trace_shutdown(q);
751 	debugfs_remove_recursive(q->debugfs_dir);
752 	q->debugfs_dir = NULL;
753 	q->sched_debugfs_dir = NULL;
754 	q->rqos_debugfs_dir = NULL;
755 	mutex_unlock(&q->debugfs_mutex);
756 }
757 
758 /**
759  * blk_register_queue - register a block layer queue with sysfs
760  * @disk: Disk of which the request queue should be registered with sysfs.
761  */
762 int blk_register_queue(struct gendisk *disk)
763 {
764 	struct request_queue *q = disk->queue;
765 	int ret;
766 
767 	mutex_lock(&q->sysfs_dir_lock);
768 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
769 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
770 	if (ret < 0)
771 		goto out_put_queue_kobj;
772 
773 	if (queue_is_mq(q)) {
774 		ret = blk_mq_sysfs_register(disk);
775 		if (ret)
776 			goto out_put_queue_kobj;
777 	}
778 	mutex_lock(&q->sysfs_lock);
779 
780 	mutex_lock(&q->debugfs_mutex);
781 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
782 	if (queue_is_mq(q))
783 		blk_mq_debugfs_register(q);
784 	mutex_unlock(&q->debugfs_mutex);
785 
786 	ret = disk_register_independent_access_ranges(disk);
787 	if (ret)
788 		goto out_debugfs_remove;
789 
790 	if (q->elevator) {
791 		ret = elv_register_queue(q, false);
792 		if (ret)
793 			goto out_unregister_ia_ranges;
794 	}
795 
796 	ret = blk_crypto_sysfs_register(disk);
797 	if (ret)
798 		goto out_elv_unregister;
799 
800 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
801 	wbt_enable_default(disk);
802 
803 	/* Now everything is ready and send out KOBJ_ADD uevent */
804 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
805 	if (q->elevator)
806 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
807 	mutex_unlock(&q->sysfs_lock);
808 	mutex_unlock(&q->sysfs_dir_lock);
809 
810 	/*
811 	 * SCSI probing may synchronously create and destroy a lot of
812 	 * request_queues for non-existent devices.  Shutting down a fully
813 	 * functional queue takes measureable wallclock time as RCU grace
814 	 * periods are involved.  To avoid excessive latency in these
815 	 * cases, a request_queue starts out in a degraded mode which is
816 	 * faster to shut down and is made fully functional here as
817 	 * request_queues for non-existent devices never get registered.
818 	 */
819 	blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
820 	percpu_ref_switch_to_percpu(&q->q_usage_counter);
821 
822 	return ret;
823 
824 out_elv_unregister:
825 	elv_unregister_queue(q);
826 out_unregister_ia_ranges:
827 	disk_unregister_independent_access_ranges(disk);
828 out_debugfs_remove:
829 	blk_debugfs_remove(disk);
830 	mutex_unlock(&q->sysfs_lock);
831 out_put_queue_kobj:
832 	kobject_put(&disk->queue_kobj);
833 	mutex_unlock(&q->sysfs_dir_lock);
834 	return ret;
835 }
836 
837 /**
838  * blk_unregister_queue - counterpart of blk_register_queue()
839  * @disk: Disk of which the request queue should be unregistered from sysfs.
840  *
841  * Note: the caller is responsible for guaranteeing that this function is called
842  * after blk_register_queue() has finished.
843  */
844 void blk_unregister_queue(struct gendisk *disk)
845 {
846 	struct request_queue *q = disk->queue;
847 
848 	if (WARN_ON(!q))
849 		return;
850 
851 	/* Return early if disk->queue was never registered. */
852 	if (!blk_queue_registered(q))
853 		return;
854 
855 	/*
856 	 * Since sysfs_remove_dir() prevents adding new directory entries
857 	 * before removal of existing entries starts, protect against
858 	 * concurrent elv_iosched_store() calls.
859 	 */
860 	mutex_lock(&q->sysfs_lock);
861 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
862 	mutex_unlock(&q->sysfs_lock);
863 
864 	mutex_lock(&q->sysfs_dir_lock);
865 	/*
866 	 * Remove the sysfs attributes before unregistering the queue data
867 	 * structures that can be modified through sysfs.
868 	 */
869 	if (queue_is_mq(q))
870 		blk_mq_sysfs_unregister(disk);
871 	blk_crypto_sysfs_unregister(disk);
872 
873 	mutex_lock(&q->sysfs_lock);
874 	elv_unregister_queue(q);
875 	disk_unregister_independent_access_ranges(disk);
876 	mutex_unlock(&q->sysfs_lock);
877 
878 	/* Now that we've deleted all child objects, we can delete the queue. */
879 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
880 	kobject_del(&disk->queue_kobj);
881 	mutex_unlock(&q->sysfs_dir_lock);
882 
883 	blk_debugfs_remove(disk);
884 }
885