xref: /linux/block/blk-sysfs.c (revision 5ddb88f22eb97218d9295e69c39e0ff7cc64e09c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct request_queue *, char *);
26 	ssize_t (*store)(struct request_queue *, const char *, size_t);
27 };
28 
29 static ssize_t
30 queue_var_show(unsigned long var, char *page)
31 {
32 	return sprintf(page, "%lu\n", var);
33 }
34 
35 static ssize_t
36 queue_var_store(unsigned long *var, const char *page, size_t count)
37 {
38 	int err;
39 	unsigned long v;
40 
41 	err = kstrtoul(page, 10, &v);
42 	if (err || v > UINT_MAX)
43 		return -EINVAL;
44 
45 	*var = v;
46 
47 	return count;
48 }
49 
50 static ssize_t queue_requests_show(struct request_queue *q, char *page)
51 {
52 	return queue_var_show(q->nr_requests, page);
53 }
54 
55 static ssize_t
56 queue_requests_store(struct request_queue *q, const char *page, size_t count)
57 {
58 	unsigned long nr;
59 	int ret, err;
60 
61 	if (!queue_is_mq(q))
62 		return -EINVAL;
63 
64 	ret = queue_var_store(&nr, page, count);
65 	if (ret < 0)
66 		return ret;
67 
68 	if (nr < BLKDEV_MIN_RQ)
69 		nr = BLKDEV_MIN_RQ;
70 
71 	err = blk_mq_update_nr_requests(q, nr);
72 	if (err)
73 		return err;
74 
75 	return ret;
76 }
77 
78 static ssize_t queue_ra_show(struct request_queue *q, char *page)
79 {
80 	unsigned long ra_kb;
81 
82 	if (!q->disk)
83 		return -EINVAL;
84 	ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
85 	return queue_var_show(ra_kb, page);
86 }
87 
88 static ssize_t
89 queue_ra_store(struct request_queue *q, const char *page, size_t count)
90 {
91 	unsigned long ra_kb;
92 	ssize_t ret;
93 
94 	if (!q->disk)
95 		return -EINVAL;
96 	ret = queue_var_store(&ra_kb, page, count);
97 	if (ret < 0)
98 		return ret;
99 	q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
100 	return ret;
101 }
102 
103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
104 {
105 	int max_sectors_kb = queue_max_sectors(q) >> 1;
106 
107 	return queue_var_show(max_sectors_kb, page);
108 }
109 
110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111 {
112 	return queue_var_show(queue_max_segments(q), page);
113 }
114 
115 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
116 		char *page)
117 {
118 	return queue_var_show(queue_max_discard_segments(q), page);
119 }
120 
121 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
122 {
123 	return queue_var_show(q->limits.max_integrity_segments, page);
124 }
125 
126 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
127 {
128 	return queue_var_show(queue_max_segment_size(q), page);
129 }
130 
131 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
132 {
133 	return queue_var_show(queue_logical_block_size(q), page);
134 }
135 
136 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
137 {
138 	return queue_var_show(queue_physical_block_size(q), page);
139 }
140 
141 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
142 {
143 	return queue_var_show(q->limits.chunk_sectors, page);
144 }
145 
146 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
147 {
148 	return queue_var_show(queue_io_min(q), page);
149 }
150 
151 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
152 {
153 	return queue_var_show(queue_io_opt(q), page);
154 }
155 
156 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
157 {
158 	return queue_var_show(q->limits.discard_granularity, page);
159 }
160 
161 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
162 {
163 
164 	return sprintf(page, "%llu\n",
165 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
166 }
167 
168 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
169 {
170 	return sprintf(page, "%llu\n",
171 		       (unsigned long long)q->limits.max_discard_sectors << 9);
172 }
173 
174 static ssize_t queue_discard_max_store(struct request_queue *q,
175 				       const char *page, size_t count)
176 {
177 	unsigned long max_discard_bytes;
178 	struct queue_limits lim;
179 	ssize_t ret;
180 	int err;
181 
182 	ret = queue_var_store(&max_discard_bytes, page, count);
183 	if (ret < 0)
184 		return ret;
185 
186 	if (max_discard_bytes & (q->limits.discard_granularity - 1))
187 		return -EINVAL;
188 
189 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
190 		return -EINVAL;
191 
192 	lim = queue_limits_start_update(q);
193 	lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
194 	err = queue_limits_commit_update(q, &lim);
195 	if (err)
196 		return err;
197 	return ret;
198 }
199 
200 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
201 {
202 	return queue_var_show(0, page);
203 }
204 
205 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
206 {
207 	return queue_var_show(0, page);
208 }
209 
210 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
211 {
212 	return sprintf(page, "%llu\n",
213 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
214 }
215 
216 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
217 						 char *page)
218 {
219 	return queue_var_show(queue_zone_write_granularity(q), page);
220 }
221 
222 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
223 {
224 	unsigned long long max_sectors = queue_max_zone_append_sectors(q);
225 
226 	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
227 }
228 
229 static ssize_t
230 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
231 {
232 	unsigned long max_sectors_kb;
233 	struct queue_limits lim;
234 	ssize_t ret;
235 	int err;
236 
237 	ret = queue_var_store(&max_sectors_kb, page, count);
238 	if (ret < 0)
239 		return ret;
240 
241 	lim = queue_limits_start_update(q);
242 	lim.max_user_sectors = max_sectors_kb << 1;
243 	err = queue_limits_commit_update(q, &lim);
244 	if (err)
245 		return err;
246 	return ret;
247 }
248 
249 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
250 {
251 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
252 
253 	return queue_var_show(max_hw_sectors_kb, page);
254 }
255 
256 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
257 {
258 	return queue_var_show(q->limits.virt_boundary_mask, page);
259 }
260 
261 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
262 {
263 	return queue_var_show(queue_dma_alignment(q), page);
264 }
265 
266 static ssize_t queue_feature_store(struct request_queue *q, const char *page,
267 		size_t count, unsigned int feature)
268 {
269 	struct queue_limits lim;
270 	unsigned long val;
271 	ssize_t ret;
272 
273 	ret = queue_var_store(&val, page, count);
274 	if (ret < 0)
275 		return ret;
276 
277 	lim = queue_limits_start_update(q);
278 	if (val)
279 		lim.features |= feature;
280 	else
281 		lim.features &= ~feature;
282 	ret = queue_limits_commit_update(q, &lim);
283 	if (ret)
284 		return ret;
285 	return count;
286 }
287 
288 #define QUEUE_SYSFS_FEATURE(_name, _feature)				 \
289 static ssize_t queue_##_name##_show(struct request_queue *q, char *page) \
290 {									 \
291 	return sprintf(page, "%u\n", !!(q->limits.features & _feature)); \
292 }									 \
293 static ssize_t queue_##_name##_store(struct request_queue *q,		 \
294 		const char *page, size_t count)				 \
295 {									 \
296 	return queue_feature_store(q, page, count, _feature);		 \
297 }
298 
299 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
300 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
301 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
302 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
303 
304 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
305 {
306 	if (blk_queue_is_zoned(q))
307 		return sprintf(page, "host-managed\n");
308 	return sprintf(page, "none\n");
309 }
310 
311 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
312 {
313 	return queue_var_show(disk_nr_zones(q->disk), page);
314 }
315 
316 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
317 {
318 	return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
319 }
320 
321 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
322 {
323 	return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
324 }
325 
326 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
327 {
328 	return queue_var_show((blk_queue_nomerges(q) << 1) |
329 			       blk_queue_noxmerges(q), page);
330 }
331 
332 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
333 				    size_t count)
334 {
335 	unsigned long nm;
336 	ssize_t ret = queue_var_store(&nm, page, count);
337 
338 	if (ret < 0)
339 		return ret;
340 
341 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
342 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
343 	if (nm == 2)
344 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
345 	else if (nm)
346 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
347 
348 	return ret;
349 }
350 
351 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
352 {
353 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
354 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
355 
356 	return queue_var_show(set << force, page);
357 }
358 
359 static ssize_t
360 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
361 {
362 	ssize_t ret = -EINVAL;
363 #ifdef CONFIG_SMP
364 	unsigned long val;
365 
366 	ret = queue_var_store(&val, page, count);
367 	if (ret < 0)
368 		return ret;
369 
370 	if (val == 2) {
371 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
372 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
373 	} else if (val == 1) {
374 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
375 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
376 	} else if (val == 0) {
377 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
378 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
379 	}
380 #endif
381 	return ret;
382 }
383 
384 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
385 {
386 	return sprintf(page, "%d\n", -1);
387 }
388 
389 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
390 				size_t count)
391 {
392 	return count;
393 }
394 
395 static ssize_t queue_poll_show(struct request_queue *q, char *page)
396 {
397 	return queue_var_show(q->limits.features & BLK_FEAT_POLL, page);
398 }
399 
400 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
401 				size_t count)
402 {
403 	if (!(q->limits.features & BLK_FEAT_POLL))
404 		return -EINVAL;
405 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
406 	pr_info_ratelimited("please use driver specific parameters instead.\n");
407 	return count;
408 }
409 
410 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
411 {
412 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
413 }
414 
415 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
416 				  size_t count)
417 {
418 	unsigned int val;
419 	int err;
420 
421 	err = kstrtou32(page, 10, &val);
422 	if (err || val == 0)
423 		return -EINVAL;
424 
425 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
426 
427 	return count;
428 }
429 
430 static ssize_t queue_wc_show(struct request_queue *q, char *page)
431 {
432 	if (q->limits.features & BLK_FLAGS_WRITE_CACHE_DISABLED)
433 		return sprintf(page, "write through\n");
434 	return sprintf(page, "write back\n");
435 }
436 
437 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
438 			      size_t count)
439 {
440 	struct queue_limits lim;
441 	bool disable;
442 	int err;
443 
444 	if (!strncmp(page, "write back", 10)) {
445 		disable = false;
446 	} else if (!strncmp(page, "write through", 13) ||
447 		   !strncmp(page, "none", 4)) {
448 		disable = true;
449 	} else {
450 		return -EINVAL;
451 	}
452 
453 	lim = queue_limits_start_update(q);
454 	if (disable)
455 		lim.flags |= BLK_FLAGS_WRITE_CACHE_DISABLED;
456 	else
457 		lim.flags &= ~BLK_FLAGS_WRITE_CACHE_DISABLED;
458 	err = queue_limits_commit_update(q, &lim);
459 	if (err)
460 		return err;
461 	return count;
462 }
463 
464 static ssize_t queue_fua_show(struct request_queue *q, char *page)
465 {
466 	return sprintf(page, "%u\n", !!(q->limits.features & BLK_FEAT_FUA));
467 }
468 
469 static ssize_t queue_dax_show(struct request_queue *q, char *page)
470 {
471 	return queue_var_show(blk_queue_dax(q), page);
472 }
473 
474 #define QUEUE_RO_ENTRY(_prefix, _name)			\
475 static struct queue_sysfs_entry _prefix##_entry = {	\
476 	.attr	= { .name = _name, .mode = 0444 },	\
477 	.show	= _prefix##_show,			\
478 };
479 
480 #define QUEUE_RW_ENTRY(_prefix, _name)			\
481 static struct queue_sysfs_entry _prefix##_entry = {	\
482 	.attr	= { .name = _name, .mode = 0644 },	\
483 	.show	= _prefix##_show,			\
484 	.store	= _prefix##_store,			\
485 };
486 
487 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
488 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
489 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
490 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
491 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
492 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
493 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
494 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
495 
496 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
497 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
498 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
499 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
500 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
501 
502 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
503 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
504 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
505 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
506 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
507 
508 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
509 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
510 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
511 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
512 
513 QUEUE_RO_ENTRY(queue_zoned, "zoned");
514 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
515 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
516 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
517 
518 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
519 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
520 QUEUE_RW_ENTRY(queue_poll, "io_poll");
521 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
522 QUEUE_RW_ENTRY(queue_wc, "write_cache");
523 QUEUE_RO_ENTRY(queue_fua, "fua");
524 QUEUE_RO_ENTRY(queue_dax, "dax");
525 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
526 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
527 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
528 
529 /* legacy alias for logical_block_size: */
530 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
531 	.attr = {.name = "hw_sector_size", .mode = 0444 },
532 	.show = queue_logical_block_size_show,
533 };
534 
535 QUEUE_RW_ENTRY(queue_rotational, "rotational");
536 QUEUE_RW_ENTRY(queue_iostats, "iostats");
537 QUEUE_RW_ENTRY(queue_add_random, "add_random");
538 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
539 
540 #ifdef CONFIG_BLK_WBT
541 static ssize_t queue_var_store64(s64 *var, const char *page)
542 {
543 	int err;
544 	s64 v;
545 
546 	err = kstrtos64(page, 10, &v);
547 	if (err < 0)
548 		return err;
549 
550 	*var = v;
551 	return 0;
552 }
553 
554 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
555 {
556 	if (!wbt_rq_qos(q))
557 		return -EINVAL;
558 
559 	if (wbt_disabled(q))
560 		return sprintf(page, "0\n");
561 
562 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
563 }
564 
565 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
566 				  size_t count)
567 {
568 	struct rq_qos *rqos;
569 	ssize_t ret;
570 	s64 val;
571 
572 	ret = queue_var_store64(&val, page);
573 	if (ret < 0)
574 		return ret;
575 	if (val < -1)
576 		return -EINVAL;
577 
578 	rqos = wbt_rq_qos(q);
579 	if (!rqos) {
580 		ret = wbt_init(q->disk);
581 		if (ret)
582 			return ret;
583 	}
584 
585 	if (val == -1)
586 		val = wbt_default_latency_nsec(q);
587 	else if (val >= 0)
588 		val *= 1000ULL;
589 
590 	if (wbt_get_min_lat(q) == val)
591 		return count;
592 
593 	/*
594 	 * Ensure that the queue is idled, in case the latency update
595 	 * ends up either enabling or disabling wbt completely. We can't
596 	 * have IO inflight if that happens.
597 	 */
598 	blk_mq_quiesce_queue(q);
599 
600 	wbt_set_min_lat(q, val);
601 
602 	blk_mq_unquiesce_queue(q);
603 
604 	return count;
605 }
606 
607 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
608 #endif
609 
610 /* Common attributes for bio-based and request-based queues. */
611 static struct attribute *queue_attrs[] = {
612 	&queue_ra_entry.attr,
613 	&queue_max_hw_sectors_entry.attr,
614 	&queue_max_sectors_entry.attr,
615 	&queue_max_segments_entry.attr,
616 	&queue_max_discard_segments_entry.attr,
617 	&queue_max_integrity_segments_entry.attr,
618 	&queue_max_segment_size_entry.attr,
619 	&queue_hw_sector_size_entry.attr,
620 	&queue_logical_block_size_entry.attr,
621 	&queue_physical_block_size_entry.attr,
622 	&queue_chunk_sectors_entry.attr,
623 	&queue_io_min_entry.attr,
624 	&queue_io_opt_entry.attr,
625 	&queue_discard_granularity_entry.attr,
626 	&queue_discard_max_entry.attr,
627 	&queue_discard_max_hw_entry.attr,
628 	&queue_discard_zeroes_data_entry.attr,
629 	&queue_write_same_max_entry.attr,
630 	&queue_write_zeroes_max_entry.attr,
631 	&queue_zone_append_max_entry.attr,
632 	&queue_zone_write_granularity_entry.attr,
633 	&queue_rotational_entry.attr,
634 	&queue_zoned_entry.attr,
635 	&queue_nr_zones_entry.attr,
636 	&queue_max_open_zones_entry.attr,
637 	&queue_max_active_zones_entry.attr,
638 	&queue_nomerges_entry.attr,
639 	&queue_iostats_entry.attr,
640 	&queue_stable_writes_entry.attr,
641 	&queue_add_random_entry.attr,
642 	&queue_poll_entry.attr,
643 	&queue_wc_entry.attr,
644 	&queue_fua_entry.attr,
645 	&queue_dax_entry.attr,
646 	&queue_poll_delay_entry.attr,
647 	&queue_virt_boundary_mask_entry.attr,
648 	&queue_dma_alignment_entry.attr,
649 	NULL,
650 };
651 
652 /* Request-based queue attributes that are not relevant for bio-based queues. */
653 static struct attribute *blk_mq_queue_attrs[] = {
654 	&queue_requests_entry.attr,
655 	&elv_iosched_entry.attr,
656 	&queue_rq_affinity_entry.attr,
657 	&queue_io_timeout_entry.attr,
658 #ifdef CONFIG_BLK_WBT
659 	&queue_wb_lat_entry.attr,
660 #endif
661 	NULL,
662 };
663 
664 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
665 				int n)
666 {
667 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
668 	struct request_queue *q = disk->queue;
669 
670 	if ((attr == &queue_max_open_zones_entry.attr ||
671 	     attr == &queue_max_active_zones_entry.attr) &&
672 	    !blk_queue_is_zoned(q))
673 		return 0;
674 
675 	return attr->mode;
676 }
677 
678 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
679 					 struct attribute *attr, int n)
680 {
681 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
682 	struct request_queue *q = disk->queue;
683 
684 	if (!queue_is_mq(q))
685 		return 0;
686 
687 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
688 		return 0;
689 
690 	return attr->mode;
691 }
692 
693 static struct attribute_group queue_attr_group = {
694 	.attrs = queue_attrs,
695 	.is_visible = queue_attr_visible,
696 };
697 
698 static struct attribute_group blk_mq_queue_attr_group = {
699 	.attrs = blk_mq_queue_attrs,
700 	.is_visible = blk_mq_queue_attr_visible,
701 };
702 
703 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
704 
705 static ssize_t
706 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
707 {
708 	struct queue_sysfs_entry *entry = to_queue(attr);
709 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
710 	struct request_queue *q = disk->queue;
711 	ssize_t res;
712 
713 	if (!entry->show)
714 		return -EIO;
715 	mutex_lock(&q->sysfs_lock);
716 	res = entry->show(q, page);
717 	mutex_unlock(&q->sysfs_lock);
718 	return res;
719 }
720 
721 static ssize_t
722 queue_attr_store(struct kobject *kobj, struct attribute *attr,
723 		    const char *page, size_t length)
724 {
725 	struct queue_sysfs_entry *entry = to_queue(attr);
726 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
727 	struct request_queue *q = disk->queue;
728 	ssize_t res;
729 
730 	if (!entry->store)
731 		return -EIO;
732 
733 	blk_mq_freeze_queue(q);
734 	mutex_lock(&q->sysfs_lock);
735 	res = entry->store(q, page, length);
736 	mutex_unlock(&q->sysfs_lock);
737 	blk_mq_unfreeze_queue(q);
738 	return res;
739 }
740 
741 static const struct sysfs_ops queue_sysfs_ops = {
742 	.show	= queue_attr_show,
743 	.store	= queue_attr_store,
744 };
745 
746 static const struct attribute_group *blk_queue_attr_groups[] = {
747 	&queue_attr_group,
748 	&blk_mq_queue_attr_group,
749 	NULL
750 };
751 
752 static void blk_queue_release(struct kobject *kobj)
753 {
754 	/* nothing to do here, all data is associated with the parent gendisk */
755 }
756 
757 static const struct kobj_type blk_queue_ktype = {
758 	.default_groups = blk_queue_attr_groups,
759 	.sysfs_ops	= &queue_sysfs_ops,
760 	.release	= blk_queue_release,
761 };
762 
763 static void blk_debugfs_remove(struct gendisk *disk)
764 {
765 	struct request_queue *q = disk->queue;
766 
767 	mutex_lock(&q->debugfs_mutex);
768 	blk_trace_shutdown(q);
769 	debugfs_remove_recursive(q->debugfs_dir);
770 	q->debugfs_dir = NULL;
771 	q->sched_debugfs_dir = NULL;
772 	q->rqos_debugfs_dir = NULL;
773 	mutex_unlock(&q->debugfs_mutex);
774 }
775 
776 /**
777  * blk_register_queue - register a block layer queue with sysfs
778  * @disk: Disk of which the request queue should be registered with sysfs.
779  */
780 int blk_register_queue(struct gendisk *disk)
781 {
782 	struct request_queue *q = disk->queue;
783 	int ret;
784 
785 	mutex_lock(&q->sysfs_dir_lock);
786 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
787 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
788 	if (ret < 0)
789 		goto out_put_queue_kobj;
790 
791 	if (queue_is_mq(q)) {
792 		ret = blk_mq_sysfs_register(disk);
793 		if (ret)
794 			goto out_put_queue_kobj;
795 	}
796 	mutex_lock(&q->sysfs_lock);
797 
798 	mutex_lock(&q->debugfs_mutex);
799 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
800 	if (queue_is_mq(q))
801 		blk_mq_debugfs_register(q);
802 	mutex_unlock(&q->debugfs_mutex);
803 
804 	ret = disk_register_independent_access_ranges(disk);
805 	if (ret)
806 		goto out_debugfs_remove;
807 
808 	if (q->elevator) {
809 		ret = elv_register_queue(q, false);
810 		if (ret)
811 			goto out_unregister_ia_ranges;
812 	}
813 
814 	ret = blk_crypto_sysfs_register(disk);
815 	if (ret)
816 		goto out_elv_unregister;
817 
818 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
819 	wbt_enable_default(disk);
820 
821 	/* Now everything is ready and send out KOBJ_ADD uevent */
822 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
823 	if (q->elevator)
824 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
825 	mutex_unlock(&q->sysfs_lock);
826 	mutex_unlock(&q->sysfs_dir_lock);
827 
828 	/*
829 	 * SCSI probing may synchronously create and destroy a lot of
830 	 * request_queues for non-existent devices.  Shutting down a fully
831 	 * functional queue takes measureable wallclock time as RCU grace
832 	 * periods are involved.  To avoid excessive latency in these
833 	 * cases, a request_queue starts out in a degraded mode which is
834 	 * faster to shut down and is made fully functional here as
835 	 * request_queues for non-existent devices never get registered.
836 	 */
837 	if (!blk_queue_init_done(q)) {
838 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
839 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
840 	}
841 
842 	return ret;
843 
844 out_elv_unregister:
845 	elv_unregister_queue(q);
846 out_unregister_ia_ranges:
847 	disk_unregister_independent_access_ranges(disk);
848 out_debugfs_remove:
849 	blk_debugfs_remove(disk);
850 	mutex_unlock(&q->sysfs_lock);
851 out_put_queue_kobj:
852 	kobject_put(&disk->queue_kobj);
853 	mutex_unlock(&q->sysfs_dir_lock);
854 	return ret;
855 }
856 
857 /**
858  * blk_unregister_queue - counterpart of blk_register_queue()
859  * @disk: Disk of which the request queue should be unregistered from sysfs.
860  *
861  * Note: the caller is responsible for guaranteeing that this function is called
862  * after blk_register_queue() has finished.
863  */
864 void blk_unregister_queue(struct gendisk *disk)
865 {
866 	struct request_queue *q = disk->queue;
867 
868 	if (WARN_ON(!q))
869 		return;
870 
871 	/* Return early if disk->queue was never registered. */
872 	if (!blk_queue_registered(q))
873 		return;
874 
875 	/*
876 	 * Since sysfs_remove_dir() prevents adding new directory entries
877 	 * before removal of existing entries starts, protect against
878 	 * concurrent elv_iosched_store() calls.
879 	 */
880 	mutex_lock(&q->sysfs_lock);
881 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
882 	mutex_unlock(&q->sysfs_lock);
883 
884 	mutex_lock(&q->sysfs_dir_lock);
885 	/*
886 	 * Remove the sysfs attributes before unregistering the queue data
887 	 * structures that can be modified through sysfs.
888 	 */
889 	if (queue_is_mq(q))
890 		blk_mq_sysfs_unregister(disk);
891 	blk_crypto_sysfs_unregister(disk);
892 
893 	mutex_lock(&q->sysfs_lock);
894 	elv_unregister_queue(q);
895 	disk_unregister_independent_access_ranges(disk);
896 	mutex_unlock(&q->sysfs_lock);
897 
898 	/* Now that we've deleted all child objects, we can delete the queue. */
899 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
900 	kobject_del(&disk->queue_kobj);
901 	mutex_unlock(&q->sysfs_dir_lock);
902 
903 	blk_debugfs_remove(disk);
904 }
905