xref: /linux/block/blk-sysfs.c (revision ab52c59103002b49f2455371e4b9c56ba3ef1781)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct request_queue *, char *);
26 	ssize_t (*store)(struct request_queue *, const char *, size_t);
27 };
28 
29 static ssize_t
30 queue_var_show(unsigned long var, char *page)
31 {
32 	return sprintf(page, "%lu\n", var);
33 }
34 
35 static ssize_t
36 queue_var_store(unsigned long *var, const char *page, size_t count)
37 {
38 	int err;
39 	unsigned long v;
40 
41 	err = kstrtoul(page, 10, &v);
42 	if (err || v > UINT_MAX)
43 		return -EINVAL;
44 
45 	*var = v;
46 
47 	return count;
48 }
49 
50 static ssize_t queue_requests_show(struct request_queue *q, char *page)
51 {
52 	return queue_var_show(q->nr_requests, page);
53 }
54 
55 static ssize_t
56 queue_requests_store(struct request_queue *q, const char *page, size_t count)
57 {
58 	unsigned long nr;
59 	int ret, err;
60 
61 	if (!queue_is_mq(q))
62 		return -EINVAL;
63 
64 	ret = queue_var_store(&nr, page, count);
65 	if (ret < 0)
66 		return ret;
67 
68 	if (nr < BLKDEV_MIN_RQ)
69 		nr = BLKDEV_MIN_RQ;
70 
71 	err = blk_mq_update_nr_requests(q, nr);
72 	if (err)
73 		return err;
74 
75 	return ret;
76 }
77 
78 static ssize_t queue_ra_show(struct request_queue *q, char *page)
79 {
80 	unsigned long ra_kb;
81 
82 	if (!q->disk)
83 		return -EINVAL;
84 	ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
85 	return queue_var_show(ra_kb, page);
86 }
87 
88 static ssize_t
89 queue_ra_store(struct request_queue *q, const char *page, size_t count)
90 {
91 	unsigned long ra_kb;
92 	ssize_t ret;
93 
94 	if (!q->disk)
95 		return -EINVAL;
96 	ret = queue_var_store(&ra_kb, page, count);
97 	if (ret < 0)
98 		return ret;
99 	q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
100 	return ret;
101 }
102 
103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
104 {
105 	int max_sectors_kb = queue_max_sectors(q) >> 1;
106 
107 	return queue_var_show(max_sectors_kb, page);
108 }
109 
110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111 {
112 	return queue_var_show(queue_max_segments(q), page);
113 }
114 
115 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
116 		char *page)
117 {
118 	return queue_var_show(queue_max_discard_segments(q), page);
119 }
120 
121 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
122 {
123 	return queue_var_show(q->limits.max_integrity_segments, page);
124 }
125 
126 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
127 {
128 	return queue_var_show(queue_max_segment_size(q), page);
129 }
130 
131 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
132 {
133 	return queue_var_show(queue_logical_block_size(q), page);
134 }
135 
136 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
137 {
138 	return queue_var_show(queue_physical_block_size(q), page);
139 }
140 
141 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
142 {
143 	return queue_var_show(q->limits.chunk_sectors, page);
144 }
145 
146 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
147 {
148 	return queue_var_show(queue_io_min(q), page);
149 }
150 
151 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
152 {
153 	return queue_var_show(queue_io_opt(q), page);
154 }
155 
156 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
157 {
158 	return queue_var_show(q->limits.discard_granularity, page);
159 }
160 
161 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
162 {
163 
164 	return sprintf(page, "%llu\n",
165 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
166 }
167 
168 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
169 {
170 	return sprintf(page, "%llu\n",
171 		       (unsigned long long)q->limits.max_discard_sectors << 9);
172 }
173 
174 static ssize_t queue_discard_max_store(struct request_queue *q,
175 				       const char *page, size_t count)
176 {
177 	unsigned long max_discard_bytes;
178 	struct queue_limits lim;
179 	ssize_t ret;
180 	int err;
181 
182 	ret = queue_var_store(&max_discard_bytes, page, count);
183 	if (ret < 0)
184 		return ret;
185 
186 	if (max_discard_bytes & (q->limits.discard_granularity - 1))
187 		return -EINVAL;
188 
189 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
190 		return -EINVAL;
191 
192 	blk_mq_freeze_queue(q);
193 	lim = queue_limits_start_update(q);
194 	lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
195 	err = queue_limits_commit_update(q, &lim);
196 	blk_mq_unfreeze_queue(q);
197 
198 	if (err)
199 		return err;
200 	return ret;
201 }
202 
203 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
204 {
205 	return queue_var_show(0, page);
206 }
207 
208 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
209 {
210 	return queue_var_show(0, page);
211 }
212 
213 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
214 {
215 	return sprintf(page, "%llu\n",
216 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
217 }
218 
219 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
220 						 char *page)
221 {
222 	return queue_var_show(queue_zone_write_granularity(q), page);
223 }
224 
225 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
226 {
227 	unsigned long long max_sectors = queue_max_zone_append_sectors(q);
228 
229 	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
230 }
231 
232 static ssize_t
233 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
234 {
235 	unsigned long max_sectors_kb;
236 	struct queue_limits lim;
237 	ssize_t ret;
238 	int err;
239 
240 	ret = queue_var_store(&max_sectors_kb, page, count);
241 	if (ret < 0)
242 		return ret;
243 
244 	blk_mq_freeze_queue(q);
245 	lim = queue_limits_start_update(q);
246 	lim.max_user_sectors = max_sectors_kb << 1;
247 	err = queue_limits_commit_update(q, &lim);
248 	blk_mq_unfreeze_queue(q);
249 	if (err)
250 		return err;
251 	return ret;
252 }
253 
254 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
255 {
256 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
257 
258 	return queue_var_show(max_hw_sectors_kb, page);
259 }
260 
261 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
262 {
263 	return queue_var_show(q->limits.virt_boundary_mask, page);
264 }
265 
266 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
267 {
268 	return queue_var_show(queue_dma_alignment(q), page);
269 }
270 
271 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
272 static ssize_t								\
273 queue_##name##_show(struct request_queue *q, char *page)		\
274 {									\
275 	int bit;							\
276 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
277 	return queue_var_show(neg ? !bit : bit, page);			\
278 }									\
279 static ssize_t								\
280 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
281 {									\
282 	unsigned long val;						\
283 	ssize_t ret;							\
284 	ret = queue_var_store(&val, page, count);			\
285 	if (ret < 0)							\
286 		 return ret;						\
287 	if (neg)							\
288 		val = !val;						\
289 									\
290 	if (val)							\
291 		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
292 	else								\
293 		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
294 	return ret;							\
295 }
296 
297 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
298 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
299 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
300 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
301 #undef QUEUE_SYSFS_BIT_FNS
302 
303 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
304 {
305 	if (blk_queue_is_zoned(q))
306 		return sprintf(page, "host-managed\n");
307 	return sprintf(page, "none\n");
308 }
309 
310 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
311 {
312 	return queue_var_show(disk_nr_zones(q->disk), page);
313 }
314 
315 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
316 {
317 	return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
318 }
319 
320 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
321 {
322 	return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
323 }
324 
325 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
326 {
327 	return queue_var_show((blk_queue_nomerges(q) << 1) |
328 			       blk_queue_noxmerges(q), page);
329 }
330 
331 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
332 				    size_t count)
333 {
334 	unsigned long nm;
335 	ssize_t ret = queue_var_store(&nm, page, count);
336 
337 	if (ret < 0)
338 		return ret;
339 
340 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
341 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
342 	if (nm == 2)
343 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
344 	else if (nm)
345 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
346 
347 	return ret;
348 }
349 
350 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
351 {
352 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
353 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
354 
355 	return queue_var_show(set << force, page);
356 }
357 
358 static ssize_t
359 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
360 {
361 	ssize_t ret = -EINVAL;
362 #ifdef CONFIG_SMP
363 	unsigned long val;
364 
365 	ret = queue_var_store(&val, page, count);
366 	if (ret < 0)
367 		return ret;
368 
369 	if (val == 2) {
370 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
371 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
372 	} else if (val == 1) {
373 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
374 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
375 	} else if (val == 0) {
376 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
377 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
378 	}
379 #endif
380 	return ret;
381 }
382 
383 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
384 {
385 	return sprintf(page, "%d\n", -1);
386 }
387 
388 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
389 				size_t count)
390 {
391 	return count;
392 }
393 
394 static ssize_t queue_poll_show(struct request_queue *q, char *page)
395 {
396 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
397 }
398 
399 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
400 				size_t count)
401 {
402 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
403 		return -EINVAL;
404 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
405 	pr_info_ratelimited("please use driver specific parameters instead.\n");
406 	return count;
407 }
408 
409 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
410 {
411 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
412 }
413 
414 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
415 				  size_t count)
416 {
417 	unsigned int val;
418 	int err;
419 
420 	err = kstrtou32(page, 10, &val);
421 	if (err || val == 0)
422 		return -EINVAL;
423 
424 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
425 
426 	return count;
427 }
428 
429 static ssize_t queue_wc_show(struct request_queue *q, char *page)
430 {
431 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
432 		return sprintf(page, "write back\n");
433 
434 	return sprintf(page, "write through\n");
435 }
436 
437 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
438 			      size_t count)
439 {
440 	if (!strncmp(page, "write back", 10)) {
441 		if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
442 			return -EINVAL;
443 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
444 	} else if (!strncmp(page, "write through", 13) ||
445 		 !strncmp(page, "none", 4)) {
446 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
447 	} else {
448 		return -EINVAL;
449 	}
450 
451 	return count;
452 }
453 
454 static ssize_t queue_fua_show(struct request_queue *q, char *page)
455 {
456 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
457 }
458 
459 static ssize_t queue_dax_show(struct request_queue *q, char *page)
460 {
461 	return queue_var_show(blk_queue_dax(q), page);
462 }
463 
464 #define QUEUE_RO_ENTRY(_prefix, _name)			\
465 static struct queue_sysfs_entry _prefix##_entry = {	\
466 	.attr	= { .name = _name, .mode = 0444 },	\
467 	.show	= _prefix##_show,			\
468 };
469 
470 #define QUEUE_RW_ENTRY(_prefix, _name)			\
471 static struct queue_sysfs_entry _prefix##_entry = {	\
472 	.attr	= { .name = _name, .mode = 0644 },	\
473 	.show	= _prefix##_show,			\
474 	.store	= _prefix##_store,			\
475 };
476 
477 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
478 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
479 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
480 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
481 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
482 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
483 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
484 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
485 
486 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
487 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
488 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
489 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
490 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
491 
492 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
493 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
494 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
495 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
496 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
497 
498 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
499 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
500 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
501 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
502 
503 QUEUE_RO_ENTRY(queue_zoned, "zoned");
504 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
505 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
506 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
507 
508 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
509 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
510 QUEUE_RW_ENTRY(queue_poll, "io_poll");
511 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
512 QUEUE_RW_ENTRY(queue_wc, "write_cache");
513 QUEUE_RO_ENTRY(queue_fua, "fua");
514 QUEUE_RO_ENTRY(queue_dax, "dax");
515 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
516 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
517 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
518 
519 /* legacy alias for logical_block_size: */
520 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
521 	.attr = {.name = "hw_sector_size", .mode = 0444 },
522 	.show = queue_logical_block_size_show,
523 };
524 
525 QUEUE_RW_ENTRY(queue_nonrot, "rotational");
526 QUEUE_RW_ENTRY(queue_iostats, "iostats");
527 QUEUE_RW_ENTRY(queue_random, "add_random");
528 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
529 
530 #ifdef CONFIG_BLK_WBT
531 static ssize_t queue_var_store64(s64 *var, const char *page)
532 {
533 	int err;
534 	s64 v;
535 
536 	err = kstrtos64(page, 10, &v);
537 	if (err < 0)
538 		return err;
539 
540 	*var = v;
541 	return 0;
542 }
543 
544 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
545 {
546 	if (!wbt_rq_qos(q))
547 		return -EINVAL;
548 
549 	if (wbt_disabled(q))
550 		return sprintf(page, "0\n");
551 
552 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
553 }
554 
555 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
556 				  size_t count)
557 {
558 	struct rq_qos *rqos;
559 	ssize_t ret;
560 	s64 val;
561 
562 	ret = queue_var_store64(&val, page);
563 	if (ret < 0)
564 		return ret;
565 	if (val < -1)
566 		return -EINVAL;
567 
568 	rqos = wbt_rq_qos(q);
569 	if (!rqos) {
570 		ret = wbt_init(q->disk);
571 		if (ret)
572 			return ret;
573 	}
574 
575 	if (val == -1)
576 		val = wbt_default_latency_nsec(q);
577 	else if (val >= 0)
578 		val *= 1000ULL;
579 
580 	if (wbt_get_min_lat(q) == val)
581 		return count;
582 
583 	/*
584 	 * Ensure that the queue is idled, in case the latency update
585 	 * ends up either enabling or disabling wbt completely. We can't
586 	 * have IO inflight if that happens.
587 	 */
588 	blk_mq_freeze_queue(q);
589 	blk_mq_quiesce_queue(q);
590 
591 	wbt_set_min_lat(q, val);
592 
593 	blk_mq_unquiesce_queue(q);
594 	blk_mq_unfreeze_queue(q);
595 
596 	return count;
597 }
598 
599 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
600 #endif
601 
602 /* Common attributes for bio-based and request-based queues. */
603 static struct attribute *queue_attrs[] = {
604 	&queue_ra_entry.attr,
605 	&queue_max_hw_sectors_entry.attr,
606 	&queue_max_sectors_entry.attr,
607 	&queue_max_segments_entry.attr,
608 	&queue_max_discard_segments_entry.attr,
609 	&queue_max_integrity_segments_entry.attr,
610 	&queue_max_segment_size_entry.attr,
611 	&queue_hw_sector_size_entry.attr,
612 	&queue_logical_block_size_entry.attr,
613 	&queue_physical_block_size_entry.attr,
614 	&queue_chunk_sectors_entry.attr,
615 	&queue_io_min_entry.attr,
616 	&queue_io_opt_entry.attr,
617 	&queue_discard_granularity_entry.attr,
618 	&queue_discard_max_entry.attr,
619 	&queue_discard_max_hw_entry.attr,
620 	&queue_discard_zeroes_data_entry.attr,
621 	&queue_write_same_max_entry.attr,
622 	&queue_write_zeroes_max_entry.attr,
623 	&queue_zone_append_max_entry.attr,
624 	&queue_zone_write_granularity_entry.attr,
625 	&queue_nonrot_entry.attr,
626 	&queue_zoned_entry.attr,
627 	&queue_nr_zones_entry.attr,
628 	&queue_max_open_zones_entry.attr,
629 	&queue_max_active_zones_entry.attr,
630 	&queue_nomerges_entry.attr,
631 	&queue_iostats_entry.attr,
632 	&queue_stable_writes_entry.attr,
633 	&queue_random_entry.attr,
634 	&queue_poll_entry.attr,
635 	&queue_wc_entry.attr,
636 	&queue_fua_entry.attr,
637 	&queue_dax_entry.attr,
638 	&queue_poll_delay_entry.attr,
639 	&queue_virt_boundary_mask_entry.attr,
640 	&queue_dma_alignment_entry.attr,
641 	NULL,
642 };
643 
644 /* Request-based queue attributes that are not relevant for bio-based queues. */
645 static struct attribute *blk_mq_queue_attrs[] = {
646 	&queue_requests_entry.attr,
647 	&elv_iosched_entry.attr,
648 	&queue_rq_affinity_entry.attr,
649 	&queue_io_timeout_entry.attr,
650 #ifdef CONFIG_BLK_WBT
651 	&queue_wb_lat_entry.attr,
652 #endif
653 	NULL,
654 };
655 
656 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
657 				int n)
658 {
659 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
660 	struct request_queue *q = disk->queue;
661 
662 	if ((attr == &queue_max_open_zones_entry.attr ||
663 	     attr == &queue_max_active_zones_entry.attr) &&
664 	    !blk_queue_is_zoned(q))
665 		return 0;
666 
667 	return attr->mode;
668 }
669 
670 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
671 					 struct attribute *attr, int n)
672 {
673 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
674 	struct request_queue *q = disk->queue;
675 
676 	if (!queue_is_mq(q))
677 		return 0;
678 
679 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
680 		return 0;
681 
682 	return attr->mode;
683 }
684 
685 static struct attribute_group queue_attr_group = {
686 	.attrs = queue_attrs,
687 	.is_visible = queue_attr_visible,
688 };
689 
690 static struct attribute_group blk_mq_queue_attr_group = {
691 	.attrs = blk_mq_queue_attrs,
692 	.is_visible = blk_mq_queue_attr_visible,
693 };
694 
695 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
696 
697 static ssize_t
698 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
699 {
700 	struct queue_sysfs_entry *entry = to_queue(attr);
701 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
702 	struct request_queue *q = disk->queue;
703 	ssize_t res;
704 
705 	if (!entry->show)
706 		return -EIO;
707 	mutex_lock(&q->sysfs_lock);
708 	res = entry->show(q, page);
709 	mutex_unlock(&q->sysfs_lock);
710 	return res;
711 }
712 
713 static ssize_t
714 queue_attr_store(struct kobject *kobj, struct attribute *attr,
715 		    const char *page, size_t length)
716 {
717 	struct queue_sysfs_entry *entry = to_queue(attr);
718 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
719 	struct request_queue *q = disk->queue;
720 	ssize_t res;
721 
722 	if (!entry->store)
723 		return -EIO;
724 
725 	mutex_lock(&q->sysfs_lock);
726 	res = entry->store(q, page, length);
727 	mutex_unlock(&q->sysfs_lock);
728 	return res;
729 }
730 
731 static const struct sysfs_ops queue_sysfs_ops = {
732 	.show	= queue_attr_show,
733 	.store	= queue_attr_store,
734 };
735 
736 static const struct attribute_group *blk_queue_attr_groups[] = {
737 	&queue_attr_group,
738 	&blk_mq_queue_attr_group,
739 	NULL
740 };
741 
742 static void blk_queue_release(struct kobject *kobj)
743 {
744 	/* nothing to do here, all data is associated with the parent gendisk */
745 }
746 
747 static const struct kobj_type blk_queue_ktype = {
748 	.default_groups = blk_queue_attr_groups,
749 	.sysfs_ops	= &queue_sysfs_ops,
750 	.release	= blk_queue_release,
751 };
752 
753 static void blk_debugfs_remove(struct gendisk *disk)
754 {
755 	struct request_queue *q = disk->queue;
756 
757 	mutex_lock(&q->debugfs_mutex);
758 	blk_trace_shutdown(q);
759 	debugfs_remove_recursive(q->debugfs_dir);
760 	q->debugfs_dir = NULL;
761 	q->sched_debugfs_dir = NULL;
762 	q->rqos_debugfs_dir = NULL;
763 	mutex_unlock(&q->debugfs_mutex);
764 }
765 
766 /**
767  * blk_register_queue - register a block layer queue with sysfs
768  * @disk: Disk of which the request queue should be registered with sysfs.
769  */
770 int blk_register_queue(struct gendisk *disk)
771 {
772 	struct request_queue *q = disk->queue;
773 	int ret;
774 
775 	mutex_lock(&q->sysfs_dir_lock);
776 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
777 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
778 	if (ret < 0)
779 		goto out_put_queue_kobj;
780 
781 	if (queue_is_mq(q)) {
782 		ret = blk_mq_sysfs_register(disk);
783 		if (ret)
784 			goto out_put_queue_kobj;
785 	}
786 	mutex_lock(&q->sysfs_lock);
787 
788 	mutex_lock(&q->debugfs_mutex);
789 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
790 	if (queue_is_mq(q))
791 		blk_mq_debugfs_register(q);
792 	mutex_unlock(&q->debugfs_mutex);
793 
794 	ret = disk_register_independent_access_ranges(disk);
795 	if (ret)
796 		goto out_debugfs_remove;
797 
798 	if (q->elevator) {
799 		ret = elv_register_queue(q, false);
800 		if (ret)
801 			goto out_unregister_ia_ranges;
802 	}
803 
804 	ret = blk_crypto_sysfs_register(disk);
805 	if (ret)
806 		goto out_elv_unregister;
807 
808 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
809 	wbt_enable_default(disk);
810 
811 	/* Now everything is ready and send out KOBJ_ADD uevent */
812 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
813 	if (q->elevator)
814 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
815 	mutex_unlock(&q->sysfs_lock);
816 	mutex_unlock(&q->sysfs_dir_lock);
817 
818 	/*
819 	 * SCSI probing may synchronously create and destroy a lot of
820 	 * request_queues for non-existent devices.  Shutting down a fully
821 	 * functional queue takes measureable wallclock time as RCU grace
822 	 * periods are involved.  To avoid excessive latency in these
823 	 * cases, a request_queue starts out in a degraded mode which is
824 	 * faster to shut down and is made fully functional here as
825 	 * request_queues for non-existent devices never get registered.
826 	 */
827 	if (!blk_queue_init_done(q)) {
828 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
829 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
830 	}
831 
832 	return ret;
833 
834 out_elv_unregister:
835 	elv_unregister_queue(q);
836 out_unregister_ia_ranges:
837 	disk_unregister_independent_access_ranges(disk);
838 out_debugfs_remove:
839 	blk_debugfs_remove(disk);
840 	mutex_unlock(&q->sysfs_lock);
841 out_put_queue_kobj:
842 	kobject_put(&disk->queue_kobj);
843 	mutex_unlock(&q->sysfs_dir_lock);
844 	return ret;
845 }
846 
847 /**
848  * blk_unregister_queue - counterpart of blk_register_queue()
849  * @disk: Disk of which the request queue should be unregistered from sysfs.
850  *
851  * Note: the caller is responsible for guaranteeing that this function is called
852  * after blk_register_queue() has finished.
853  */
854 void blk_unregister_queue(struct gendisk *disk)
855 {
856 	struct request_queue *q = disk->queue;
857 
858 	if (WARN_ON(!q))
859 		return;
860 
861 	/* Return early if disk->queue was never registered. */
862 	if (!blk_queue_registered(q))
863 		return;
864 
865 	/*
866 	 * Since sysfs_remove_dir() prevents adding new directory entries
867 	 * before removal of existing entries starts, protect against
868 	 * concurrent elv_iosched_store() calls.
869 	 */
870 	mutex_lock(&q->sysfs_lock);
871 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
872 	mutex_unlock(&q->sysfs_lock);
873 
874 	mutex_lock(&q->sysfs_dir_lock);
875 	/*
876 	 * Remove the sysfs attributes before unregistering the queue data
877 	 * structures that can be modified through sysfs.
878 	 */
879 	if (queue_is_mq(q))
880 		blk_mq_sysfs_unregister(disk);
881 	blk_crypto_sysfs_unregister(disk);
882 
883 	mutex_lock(&q->sysfs_lock);
884 	elv_unregister_queue(q);
885 	disk_unregister_independent_access_ranges(disk);
886 	mutex_unlock(&q->sysfs_lock);
887 
888 	/* Now that we've deleted all child objects, we can delete the queue. */
889 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
890 	kobject_del(&disk->queue_kobj);
891 	mutex_unlock(&q->sysfs_dir_lock);
892 
893 	blk_debugfs_remove(disk);
894 }
895