xref: /linux/block/blk-sysfs.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct gendisk *disk, char *page);
26 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
27 };
28 
29 static ssize_t
30 queue_var_show(unsigned long var, char *page)
31 {
32 	return sprintf(page, "%lu\n", var);
33 }
34 
35 static ssize_t
36 queue_var_store(unsigned long *var, const char *page, size_t count)
37 {
38 	int err;
39 	unsigned long v;
40 
41 	err = kstrtoul(page, 10, &v);
42 	if (err || v > UINT_MAX)
43 		return -EINVAL;
44 
45 	*var = v;
46 
47 	return count;
48 }
49 
50 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
51 {
52 	return queue_var_show(disk->queue->nr_requests, page);
53 }
54 
55 static ssize_t
56 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
57 {
58 	unsigned long nr;
59 	int ret, err;
60 
61 	if (!queue_is_mq(disk->queue))
62 		return -EINVAL;
63 
64 	ret = queue_var_store(&nr, page, count);
65 	if (ret < 0)
66 		return ret;
67 
68 	if (nr < BLKDEV_MIN_RQ)
69 		nr = BLKDEV_MIN_RQ;
70 
71 	err = blk_mq_update_nr_requests(disk->queue, nr);
72 	if (err)
73 		return err;
74 
75 	return ret;
76 }
77 
78 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
79 {
80 	return queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
81 }
82 
83 static ssize_t
84 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
85 {
86 	unsigned long ra_kb;
87 	ssize_t ret;
88 
89 	ret = queue_var_store(&ra_kb, page, count);
90 	if (ret < 0)
91 		return ret;
92 	disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
93 	return ret;
94 }
95 
96 #define QUEUE_SYSFS_LIMIT_SHOW(_field)					\
97 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
98 {									\
99 	return queue_var_show(disk->queue->limits._field, page);	\
100 }
101 
102 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
103 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
104 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
105 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
106 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
107 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
108 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
109 QUEUE_SYSFS_LIMIT_SHOW(io_min)
110 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
111 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
112 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
113 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
114 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
115 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
116 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
117 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
118 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
119 
120 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field)			\
121 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
122 {									\
123 	return sprintf(page, "%llu\n",					\
124 		(unsigned long long)disk->queue->limits._field <<	\
125 			SECTOR_SHIFT);					\
126 }
127 
128 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
129 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
130 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
131 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
132 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
133 
134 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field)			\
135 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
136 {									\
137 	return queue_var_show(disk->queue->limits._field >> 1, page);	\
138 }
139 
140 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
141 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
142 
143 #define QUEUE_SYSFS_SHOW_CONST(_name, _val)				\
144 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
145 {									\
146 	return sprintf(page, "%d\n", _val);				\
147 }
148 
149 /* deprecated fields */
150 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
151 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
152 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
153 
154 static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
155 		const char *page, size_t count)
156 {
157 	unsigned long max_discard_bytes;
158 	struct queue_limits lim;
159 	ssize_t ret;
160 	int err;
161 
162 	ret = queue_var_store(&max_discard_bytes, page, count);
163 	if (ret < 0)
164 		return ret;
165 
166 	if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
167 		return -EINVAL;
168 
169 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
170 		return -EINVAL;
171 
172 	lim = queue_limits_start_update(disk->queue);
173 	lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
174 	err = queue_limits_commit_update(disk->queue, &lim);
175 	if (err)
176 		return err;
177 	return ret;
178 }
179 
180 /*
181  * For zone append queue_max_zone_append_sectors does not just return the
182  * underlying queue limits, but actually contains a calculation.  Because of
183  * that we can't simply use QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES here.
184  */
185 static ssize_t queue_zone_append_max_show(struct gendisk *disk, char *page)
186 {
187 	return sprintf(page, "%llu\n",
188 		(u64)queue_max_zone_append_sectors(disk->queue) <<
189 			SECTOR_SHIFT);
190 }
191 
192 static ssize_t
193 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count)
194 {
195 	unsigned long max_sectors_kb;
196 	struct queue_limits lim;
197 	ssize_t ret;
198 	int err;
199 
200 	ret = queue_var_store(&max_sectors_kb, page, count);
201 	if (ret < 0)
202 		return ret;
203 
204 	lim = queue_limits_start_update(disk->queue);
205 	lim.max_user_sectors = max_sectors_kb << 1;
206 	err = queue_limits_commit_update(disk->queue, &lim);
207 	if (err)
208 		return err;
209 	return ret;
210 }
211 
212 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
213 		size_t count, blk_features_t feature)
214 {
215 	struct queue_limits lim;
216 	unsigned long val;
217 	ssize_t ret;
218 
219 	ret = queue_var_store(&val, page, count);
220 	if (ret < 0)
221 		return ret;
222 
223 	lim = queue_limits_start_update(disk->queue);
224 	if (val)
225 		lim.features |= feature;
226 	else
227 		lim.features &= ~feature;
228 	ret = queue_limits_commit_update(disk->queue, &lim);
229 	if (ret)
230 		return ret;
231 	return count;
232 }
233 
234 #define QUEUE_SYSFS_FEATURE(_name, _feature)				\
235 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
236 {									\
237 	return sprintf(page, "%u\n",					\
238 		!!(disk->queue->limits.features & _feature));		\
239 }									\
240 static ssize_t queue_##_name##_store(struct gendisk *disk,		\
241 		const char *page, size_t count)				\
242 {									\
243 	return queue_feature_store(disk, page, count, _feature);	\
244 }
245 
246 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
247 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
248 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
249 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
250 
251 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature)			\
252 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
253 {									\
254 	return sprintf(page, "%u\n",					\
255 		!!(disk->queue->limits.features & _feature));		\
256 }
257 
258 QUEUE_SYSFS_FEATURE_SHOW(poll, BLK_FEAT_POLL);
259 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
260 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
261 
262 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
263 {
264 	if (blk_queue_is_zoned(disk->queue))
265 		return sprintf(page, "host-managed\n");
266 	return sprintf(page, "none\n");
267 }
268 
269 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
270 {
271 	return queue_var_show(disk_nr_zones(disk), page);
272 }
273 
274 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
275 {
276 	return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
277 			       blk_queue_noxmerges(disk->queue), page);
278 }
279 
280 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
281 				    size_t count)
282 {
283 	unsigned long nm;
284 	ssize_t ret = queue_var_store(&nm, page, count);
285 
286 	if (ret < 0)
287 		return ret;
288 
289 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, disk->queue);
290 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, disk->queue);
291 	if (nm == 2)
292 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, disk->queue);
293 	else if (nm)
294 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, disk->queue);
295 
296 	return ret;
297 }
298 
299 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
300 {
301 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
302 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
303 
304 	return queue_var_show(set << force, page);
305 }
306 
307 static ssize_t
308 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
309 {
310 	ssize_t ret = -EINVAL;
311 #ifdef CONFIG_SMP
312 	struct request_queue *q = disk->queue;
313 	unsigned long val;
314 
315 	ret = queue_var_store(&val, page, count);
316 	if (ret < 0)
317 		return ret;
318 
319 	if (val == 2) {
320 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
321 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
322 	} else if (val == 1) {
323 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
324 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
325 	} else if (val == 0) {
326 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
327 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
328 	}
329 #endif
330 	return ret;
331 }
332 
333 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
334 				size_t count)
335 {
336 	return count;
337 }
338 
339 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
340 				size_t count)
341 {
342 	if (!(disk->queue->limits.features & BLK_FEAT_POLL))
343 		return -EINVAL;
344 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
345 	pr_info_ratelimited("please use driver specific parameters instead.\n");
346 	return count;
347 }
348 
349 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
350 {
351 	return sprintf(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
352 }
353 
354 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
355 				  size_t count)
356 {
357 	unsigned int val;
358 	int err;
359 
360 	err = kstrtou32(page, 10, &val);
361 	if (err || val == 0)
362 		return -EINVAL;
363 
364 	blk_queue_rq_timeout(disk->queue, msecs_to_jiffies(val));
365 
366 	return count;
367 }
368 
369 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
370 {
371 	if (blk_queue_write_cache(disk->queue))
372 		return sprintf(page, "write back\n");
373 	return sprintf(page, "write through\n");
374 }
375 
376 static ssize_t queue_wc_store(struct gendisk *disk, const char *page,
377 			      size_t count)
378 {
379 	struct queue_limits lim;
380 	bool disable;
381 	int err;
382 
383 	if (!strncmp(page, "write back", 10)) {
384 		disable = false;
385 	} else if (!strncmp(page, "write through", 13) ||
386 		   !strncmp(page, "none", 4)) {
387 		disable = true;
388 	} else {
389 		return -EINVAL;
390 	}
391 
392 	lim = queue_limits_start_update(disk->queue);
393 	if (disable)
394 		lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
395 	else
396 		lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
397 	err = queue_limits_commit_update(disk->queue, &lim);
398 	if (err)
399 		return err;
400 	return count;
401 }
402 
403 #define QUEUE_RO_ENTRY(_prefix, _name)			\
404 static struct queue_sysfs_entry _prefix##_entry = {	\
405 	.attr	= { .name = _name, .mode = 0444 },	\
406 	.show	= _prefix##_show,			\
407 };
408 
409 #define QUEUE_RW_ENTRY(_prefix, _name)			\
410 static struct queue_sysfs_entry _prefix##_entry = {	\
411 	.attr	= { .name = _name, .mode = 0644 },	\
412 	.show	= _prefix##_show,			\
413 	.store	= _prefix##_store,			\
414 };
415 
416 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
417 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
418 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
419 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
420 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
421 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
422 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
423 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
424 
425 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
426 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
427 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
428 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
429 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
430 
431 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
432 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
433 QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
434 QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
435 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
436 
437 QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
438 QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
439 		"atomic_write_boundary_bytes");
440 QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
441 QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
442 
443 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
444 QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
445 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
446 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
447 
448 QUEUE_RO_ENTRY(queue_zoned, "zoned");
449 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
450 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
451 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
452 
453 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
454 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
455 QUEUE_RW_ENTRY(queue_poll, "io_poll");
456 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
457 QUEUE_RW_ENTRY(queue_wc, "write_cache");
458 QUEUE_RO_ENTRY(queue_fua, "fua");
459 QUEUE_RO_ENTRY(queue_dax, "dax");
460 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
461 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
462 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
463 
464 /* legacy alias for logical_block_size: */
465 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
466 	.attr = {.name = "hw_sector_size", .mode = 0444 },
467 	.show = queue_logical_block_size_show,
468 };
469 
470 QUEUE_RW_ENTRY(queue_rotational, "rotational");
471 QUEUE_RW_ENTRY(queue_iostats, "iostats");
472 QUEUE_RW_ENTRY(queue_add_random, "add_random");
473 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
474 
475 #ifdef CONFIG_BLK_WBT
476 static ssize_t queue_var_store64(s64 *var, const char *page)
477 {
478 	int err;
479 	s64 v;
480 
481 	err = kstrtos64(page, 10, &v);
482 	if (err < 0)
483 		return err;
484 
485 	*var = v;
486 	return 0;
487 }
488 
489 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
490 {
491 	if (!wbt_rq_qos(disk->queue))
492 		return -EINVAL;
493 
494 	if (wbt_disabled(disk->queue))
495 		return sprintf(page, "0\n");
496 
497 	return sprintf(page, "%llu\n",
498 		div_u64(wbt_get_min_lat(disk->queue), 1000));
499 }
500 
501 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
502 				  size_t count)
503 {
504 	struct request_queue *q = disk->queue;
505 	struct rq_qos *rqos;
506 	ssize_t ret;
507 	s64 val;
508 
509 	ret = queue_var_store64(&val, page);
510 	if (ret < 0)
511 		return ret;
512 	if (val < -1)
513 		return -EINVAL;
514 
515 	rqos = wbt_rq_qos(q);
516 	if (!rqos) {
517 		ret = wbt_init(disk);
518 		if (ret)
519 			return ret;
520 	}
521 
522 	if (val == -1)
523 		val = wbt_default_latency_nsec(q);
524 	else if (val >= 0)
525 		val *= 1000ULL;
526 
527 	if (wbt_get_min_lat(q) == val)
528 		return count;
529 
530 	/*
531 	 * Ensure that the queue is idled, in case the latency update
532 	 * ends up either enabling or disabling wbt completely. We can't
533 	 * have IO inflight if that happens.
534 	 */
535 	blk_mq_quiesce_queue(q);
536 
537 	wbt_set_min_lat(q, val);
538 
539 	blk_mq_unquiesce_queue(q);
540 
541 	return count;
542 }
543 
544 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
545 #endif
546 
547 /* Common attributes for bio-based and request-based queues. */
548 static struct attribute *queue_attrs[] = {
549 	&queue_ra_entry.attr,
550 	&queue_max_hw_sectors_entry.attr,
551 	&queue_max_sectors_entry.attr,
552 	&queue_max_segments_entry.attr,
553 	&queue_max_discard_segments_entry.attr,
554 	&queue_max_integrity_segments_entry.attr,
555 	&queue_max_segment_size_entry.attr,
556 	&queue_hw_sector_size_entry.attr,
557 	&queue_logical_block_size_entry.attr,
558 	&queue_physical_block_size_entry.attr,
559 	&queue_chunk_sectors_entry.attr,
560 	&queue_io_min_entry.attr,
561 	&queue_io_opt_entry.attr,
562 	&queue_discard_granularity_entry.attr,
563 	&queue_max_discard_sectors_entry.attr,
564 	&queue_max_hw_discard_sectors_entry.attr,
565 	&queue_discard_zeroes_data_entry.attr,
566 	&queue_atomic_write_max_sectors_entry.attr,
567 	&queue_atomic_write_boundary_sectors_entry.attr,
568 	&queue_atomic_write_unit_min_entry.attr,
569 	&queue_atomic_write_unit_max_entry.attr,
570 	&queue_write_same_max_entry.attr,
571 	&queue_max_write_zeroes_sectors_entry.attr,
572 	&queue_zone_append_max_entry.attr,
573 	&queue_zone_write_granularity_entry.attr,
574 	&queue_rotational_entry.attr,
575 	&queue_zoned_entry.attr,
576 	&queue_nr_zones_entry.attr,
577 	&queue_max_open_zones_entry.attr,
578 	&queue_max_active_zones_entry.attr,
579 	&queue_nomerges_entry.attr,
580 	&queue_iostats_entry.attr,
581 	&queue_stable_writes_entry.attr,
582 	&queue_add_random_entry.attr,
583 	&queue_poll_entry.attr,
584 	&queue_wc_entry.attr,
585 	&queue_fua_entry.attr,
586 	&queue_dax_entry.attr,
587 	&queue_poll_delay_entry.attr,
588 	&queue_virt_boundary_mask_entry.attr,
589 	&queue_dma_alignment_entry.attr,
590 	NULL,
591 };
592 
593 /* Request-based queue attributes that are not relevant for bio-based queues. */
594 static struct attribute *blk_mq_queue_attrs[] = {
595 	&queue_requests_entry.attr,
596 	&elv_iosched_entry.attr,
597 	&queue_rq_affinity_entry.attr,
598 	&queue_io_timeout_entry.attr,
599 #ifdef CONFIG_BLK_WBT
600 	&queue_wb_lat_entry.attr,
601 #endif
602 	NULL,
603 };
604 
605 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
606 				int n)
607 {
608 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
609 	struct request_queue *q = disk->queue;
610 
611 	if ((attr == &queue_max_open_zones_entry.attr ||
612 	     attr == &queue_max_active_zones_entry.attr) &&
613 	    !blk_queue_is_zoned(q))
614 		return 0;
615 
616 	return attr->mode;
617 }
618 
619 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
620 					 struct attribute *attr, int n)
621 {
622 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
623 	struct request_queue *q = disk->queue;
624 
625 	if (!queue_is_mq(q))
626 		return 0;
627 
628 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
629 		return 0;
630 
631 	return attr->mode;
632 }
633 
634 static struct attribute_group queue_attr_group = {
635 	.attrs = queue_attrs,
636 	.is_visible = queue_attr_visible,
637 };
638 
639 static struct attribute_group blk_mq_queue_attr_group = {
640 	.attrs = blk_mq_queue_attrs,
641 	.is_visible = blk_mq_queue_attr_visible,
642 };
643 
644 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
645 
646 static ssize_t
647 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
648 {
649 	struct queue_sysfs_entry *entry = to_queue(attr);
650 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
651 	ssize_t res;
652 
653 	if (!entry->show)
654 		return -EIO;
655 	mutex_lock(&disk->queue->sysfs_lock);
656 	res = entry->show(disk, page);
657 	mutex_unlock(&disk->queue->sysfs_lock);
658 	return res;
659 }
660 
661 static ssize_t
662 queue_attr_store(struct kobject *kobj, struct attribute *attr,
663 		    const char *page, size_t length)
664 {
665 	struct queue_sysfs_entry *entry = to_queue(attr);
666 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
667 	struct request_queue *q = disk->queue;
668 	ssize_t res;
669 
670 	if (!entry->store)
671 		return -EIO;
672 
673 	blk_mq_freeze_queue(q);
674 	mutex_lock(&q->sysfs_lock);
675 	res = entry->store(disk, page, length);
676 	mutex_unlock(&q->sysfs_lock);
677 	blk_mq_unfreeze_queue(q);
678 	return res;
679 }
680 
681 static const struct sysfs_ops queue_sysfs_ops = {
682 	.show	= queue_attr_show,
683 	.store	= queue_attr_store,
684 };
685 
686 static const struct attribute_group *blk_queue_attr_groups[] = {
687 	&queue_attr_group,
688 	&blk_mq_queue_attr_group,
689 	NULL
690 };
691 
692 static void blk_queue_release(struct kobject *kobj)
693 {
694 	/* nothing to do here, all data is associated with the parent gendisk */
695 }
696 
697 static const struct kobj_type blk_queue_ktype = {
698 	.default_groups = blk_queue_attr_groups,
699 	.sysfs_ops	= &queue_sysfs_ops,
700 	.release	= blk_queue_release,
701 };
702 
703 static void blk_debugfs_remove(struct gendisk *disk)
704 {
705 	struct request_queue *q = disk->queue;
706 
707 	mutex_lock(&q->debugfs_mutex);
708 	blk_trace_shutdown(q);
709 	debugfs_remove_recursive(q->debugfs_dir);
710 	q->debugfs_dir = NULL;
711 	q->sched_debugfs_dir = NULL;
712 	q->rqos_debugfs_dir = NULL;
713 	mutex_unlock(&q->debugfs_mutex);
714 }
715 
716 /**
717  * blk_register_queue - register a block layer queue with sysfs
718  * @disk: Disk of which the request queue should be registered with sysfs.
719  */
720 int blk_register_queue(struct gendisk *disk)
721 {
722 	struct request_queue *q = disk->queue;
723 	int ret;
724 
725 	mutex_lock(&q->sysfs_dir_lock);
726 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
727 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
728 	if (ret < 0)
729 		goto out_put_queue_kobj;
730 
731 	if (queue_is_mq(q)) {
732 		ret = blk_mq_sysfs_register(disk);
733 		if (ret)
734 			goto out_put_queue_kobj;
735 	}
736 	mutex_lock(&q->sysfs_lock);
737 
738 	mutex_lock(&q->debugfs_mutex);
739 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
740 	if (queue_is_mq(q))
741 		blk_mq_debugfs_register(q);
742 	mutex_unlock(&q->debugfs_mutex);
743 
744 	ret = disk_register_independent_access_ranges(disk);
745 	if (ret)
746 		goto out_debugfs_remove;
747 
748 	if (q->elevator) {
749 		ret = elv_register_queue(q, false);
750 		if (ret)
751 			goto out_unregister_ia_ranges;
752 	}
753 
754 	ret = blk_crypto_sysfs_register(disk);
755 	if (ret)
756 		goto out_elv_unregister;
757 
758 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
759 	wbt_enable_default(disk);
760 
761 	/* Now everything is ready and send out KOBJ_ADD uevent */
762 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
763 	if (q->elevator)
764 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
765 	mutex_unlock(&q->sysfs_lock);
766 	mutex_unlock(&q->sysfs_dir_lock);
767 
768 	/*
769 	 * SCSI probing may synchronously create and destroy a lot of
770 	 * request_queues for non-existent devices.  Shutting down a fully
771 	 * functional queue takes measureable wallclock time as RCU grace
772 	 * periods are involved.  To avoid excessive latency in these
773 	 * cases, a request_queue starts out in a degraded mode which is
774 	 * faster to shut down and is made fully functional here as
775 	 * request_queues for non-existent devices never get registered.
776 	 */
777 	if (!blk_queue_init_done(q)) {
778 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
779 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
780 	}
781 
782 	return ret;
783 
784 out_elv_unregister:
785 	elv_unregister_queue(q);
786 out_unregister_ia_ranges:
787 	disk_unregister_independent_access_ranges(disk);
788 out_debugfs_remove:
789 	blk_debugfs_remove(disk);
790 	mutex_unlock(&q->sysfs_lock);
791 out_put_queue_kobj:
792 	kobject_put(&disk->queue_kobj);
793 	mutex_unlock(&q->sysfs_dir_lock);
794 	return ret;
795 }
796 
797 /**
798  * blk_unregister_queue - counterpart of blk_register_queue()
799  * @disk: Disk of which the request queue should be unregistered from sysfs.
800  *
801  * Note: the caller is responsible for guaranteeing that this function is called
802  * after blk_register_queue() has finished.
803  */
804 void blk_unregister_queue(struct gendisk *disk)
805 {
806 	struct request_queue *q = disk->queue;
807 
808 	if (WARN_ON(!q))
809 		return;
810 
811 	/* Return early if disk->queue was never registered. */
812 	if (!blk_queue_registered(q))
813 		return;
814 
815 	/*
816 	 * Since sysfs_remove_dir() prevents adding new directory entries
817 	 * before removal of existing entries starts, protect against
818 	 * concurrent elv_iosched_store() calls.
819 	 */
820 	mutex_lock(&q->sysfs_lock);
821 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
822 	mutex_unlock(&q->sysfs_lock);
823 
824 	mutex_lock(&q->sysfs_dir_lock);
825 	/*
826 	 * Remove the sysfs attributes before unregistering the queue data
827 	 * structures that can be modified through sysfs.
828 	 */
829 	if (queue_is_mq(q))
830 		blk_mq_sysfs_unregister(disk);
831 	blk_crypto_sysfs_unregister(disk);
832 
833 	mutex_lock(&q->sysfs_lock);
834 	elv_unregister_queue(q);
835 	disk_unregister_independent_access_ranges(disk);
836 	mutex_unlock(&q->sysfs_lock);
837 
838 	/* Now that we've deleted all child objects, we can delete the queue. */
839 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
840 	kobject_del(&disk->queue_kobj);
841 	mutex_unlock(&q->sysfs_dir_lock);
842 
843 	blk_debugfs_remove(disk);
844 }
845