xref: /linux/block/blk-sysfs.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-mq.h>
13 #include <linux/debugfs.h>
14 
15 #include "blk.h"
16 #include "blk-mq.h"
17 #include "blk-mq-debugfs.h"
18 #include "blk-mq-sched.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21 #include "blk-cgroup.h"
22 #include "blk-throttle.h"
23 
24 struct queue_sysfs_entry {
25 	struct attribute attr;
26 	ssize_t (*show)(struct request_queue *, char *);
27 	ssize_t (*store)(struct request_queue *, const char *, size_t);
28 };
29 
30 static ssize_t
31 queue_var_show(unsigned long var, char *page)
32 {
33 	return sprintf(page, "%lu\n", var);
34 }
35 
36 static ssize_t
37 queue_var_store(unsigned long *var, const char *page, size_t count)
38 {
39 	int err;
40 	unsigned long v;
41 
42 	err = kstrtoul(page, 10, &v);
43 	if (err || v > UINT_MAX)
44 		return -EINVAL;
45 
46 	*var = v;
47 
48 	return count;
49 }
50 
51 static ssize_t queue_var_store64(s64 *var, const char *page)
52 {
53 	int err;
54 	s64 v;
55 
56 	err = kstrtos64(page, 10, &v);
57 	if (err < 0)
58 		return err;
59 
60 	*var = v;
61 	return 0;
62 }
63 
64 static ssize_t queue_requests_show(struct request_queue *q, char *page)
65 {
66 	return queue_var_show(q->nr_requests, page);
67 }
68 
69 static ssize_t
70 queue_requests_store(struct request_queue *q, const char *page, size_t count)
71 {
72 	unsigned long nr;
73 	int ret, err;
74 
75 	if (!queue_is_mq(q))
76 		return -EINVAL;
77 
78 	ret = queue_var_store(&nr, page, count);
79 	if (ret < 0)
80 		return ret;
81 
82 	if (nr < BLKDEV_MIN_RQ)
83 		nr = BLKDEV_MIN_RQ;
84 
85 	err = blk_mq_update_nr_requests(q, nr);
86 	if (err)
87 		return err;
88 
89 	return ret;
90 }
91 
92 static ssize_t queue_ra_show(struct request_queue *q, char *page)
93 {
94 	unsigned long ra_kb;
95 
96 	if (!q->disk)
97 		return -EINVAL;
98 	ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
99 	return queue_var_show(ra_kb, page);
100 }
101 
102 static ssize_t
103 queue_ra_store(struct request_queue *q, const char *page, size_t count)
104 {
105 	unsigned long ra_kb;
106 	ssize_t ret;
107 
108 	if (!q->disk)
109 		return -EINVAL;
110 	ret = queue_var_store(&ra_kb, page, count);
111 	if (ret < 0)
112 		return ret;
113 	q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
114 	return ret;
115 }
116 
117 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
118 {
119 	int max_sectors_kb = queue_max_sectors(q) >> 1;
120 
121 	return queue_var_show(max_sectors_kb, page);
122 }
123 
124 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
125 {
126 	return queue_var_show(queue_max_segments(q), page);
127 }
128 
129 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
130 		char *page)
131 {
132 	return queue_var_show(queue_max_discard_segments(q), page);
133 }
134 
135 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
136 {
137 	return queue_var_show(q->limits.max_integrity_segments, page);
138 }
139 
140 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
141 {
142 	return queue_var_show(queue_max_segment_size(q), page);
143 }
144 
145 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
146 {
147 	return queue_var_show(queue_logical_block_size(q), page);
148 }
149 
150 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
151 {
152 	return queue_var_show(queue_physical_block_size(q), page);
153 }
154 
155 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
156 {
157 	return queue_var_show(q->limits.chunk_sectors, page);
158 }
159 
160 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
161 {
162 	return queue_var_show(queue_io_min(q), page);
163 }
164 
165 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
166 {
167 	return queue_var_show(queue_io_opt(q), page);
168 }
169 
170 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
171 {
172 	return queue_var_show(q->limits.discard_granularity, page);
173 }
174 
175 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
176 {
177 
178 	return sprintf(page, "%llu\n",
179 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
180 }
181 
182 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
183 {
184 	return sprintf(page, "%llu\n",
185 		       (unsigned long long)q->limits.max_discard_sectors << 9);
186 }
187 
188 static ssize_t queue_discard_max_store(struct request_queue *q,
189 				       const char *page, size_t count)
190 {
191 	unsigned long max_discard;
192 	ssize_t ret = queue_var_store(&max_discard, page, count);
193 
194 	if (ret < 0)
195 		return ret;
196 
197 	if (max_discard & (q->limits.discard_granularity - 1))
198 		return -EINVAL;
199 
200 	max_discard >>= 9;
201 	if (max_discard > UINT_MAX)
202 		return -EINVAL;
203 
204 	if (max_discard > q->limits.max_hw_discard_sectors)
205 		max_discard = q->limits.max_hw_discard_sectors;
206 
207 	q->limits.max_discard_sectors = max_discard;
208 	return ret;
209 }
210 
211 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
212 {
213 	return queue_var_show(0, page);
214 }
215 
216 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
217 {
218 	return queue_var_show(0, page);
219 }
220 
221 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
222 {
223 	return sprintf(page, "%llu\n",
224 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
225 }
226 
227 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
228 						 char *page)
229 {
230 	return queue_var_show(queue_zone_write_granularity(q), page);
231 }
232 
233 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
234 {
235 	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
236 
237 	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
238 }
239 
240 static ssize_t
241 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
242 {
243 	unsigned long var;
244 	unsigned int max_sectors_kb,
245 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
246 			page_kb = 1 << (PAGE_SHIFT - 10);
247 	ssize_t ret = queue_var_store(&var, page, count);
248 
249 	if (ret < 0)
250 		return ret;
251 
252 	max_sectors_kb = (unsigned int)var;
253 	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb,
254 					 q->limits.max_dev_sectors >> 1);
255 	if (max_sectors_kb == 0) {
256 		q->limits.max_user_sectors = 0;
257 		max_sectors_kb = min(max_hw_sectors_kb,
258 				     BLK_DEF_MAX_SECTORS >> 1);
259 	} else {
260 		if (max_sectors_kb > max_hw_sectors_kb ||
261 		    max_sectors_kb < page_kb)
262 			return -EINVAL;
263 		q->limits.max_user_sectors = max_sectors_kb << 1;
264 	}
265 
266 	spin_lock_irq(&q->queue_lock);
267 	q->limits.max_sectors = max_sectors_kb << 1;
268 	if (q->disk)
269 		q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
270 	spin_unlock_irq(&q->queue_lock);
271 
272 	return ret;
273 }
274 
275 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
276 {
277 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
278 
279 	return queue_var_show(max_hw_sectors_kb, page);
280 }
281 
282 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
283 {
284 	return queue_var_show(q->limits.virt_boundary_mask, page);
285 }
286 
287 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
288 {
289 	return queue_var_show(queue_dma_alignment(q), page);
290 }
291 
292 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
293 static ssize_t								\
294 queue_##name##_show(struct request_queue *q, char *page)		\
295 {									\
296 	int bit;							\
297 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
298 	return queue_var_show(neg ? !bit : bit, page);			\
299 }									\
300 static ssize_t								\
301 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
302 {									\
303 	unsigned long val;						\
304 	ssize_t ret;							\
305 	ret = queue_var_store(&val, page, count);			\
306 	if (ret < 0)							\
307 		 return ret;						\
308 	if (neg)							\
309 		val = !val;						\
310 									\
311 	if (val)							\
312 		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
313 	else								\
314 		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
315 	return ret;							\
316 }
317 
318 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
319 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
320 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
321 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
322 #undef QUEUE_SYSFS_BIT_FNS
323 
324 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
325 {
326 	switch (blk_queue_zoned_model(q)) {
327 	case BLK_ZONED_HA:
328 		return sprintf(page, "host-aware\n");
329 	case BLK_ZONED_HM:
330 		return sprintf(page, "host-managed\n");
331 	default:
332 		return sprintf(page, "none\n");
333 	}
334 }
335 
336 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
337 {
338 	return queue_var_show(disk_nr_zones(q->disk), page);
339 }
340 
341 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
342 {
343 	return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
344 }
345 
346 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
347 {
348 	return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
349 }
350 
351 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
352 {
353 	return queue_var_show((blk_queue_nomerges(q) << 1) |
354 			       blk_queue_noxmerges(q), page);
355 }
356 
357 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
358 				    size_t count)
359 {
360 	unsigned long nm;
361 	ssize_t ret = queue_var_store(&nm, page, count);
362 
363 	if (ret < 0)
364 		return ret;
365 
366 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
367 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
368 	if (nm == 2)
369 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
370 	else if (nm)
371 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
372 
373 	return ret;
374 }
375 
376 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
377 {
378 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
379 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
380 
381 	return queue_var_show(set << force, page);
382 }
383 
384 static ssize_t
385 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
386 {
387 	ssize_t ret = -EINVAL;
388 #ifdef CONFIG_SMP
389 	unsigned long val;
390 
391 	ret = queue_var_store(&val, page, count);
392 	if (ret < 0)
393 		return ret;
394 
395 	if (val == 2) {
396 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
397 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
398 	} else if (val == 1) {
399 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
400 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
401 	} else if (val == 0) {
402 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
403 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
404 	}
405 #endif
406 	return ret;
407 }
408 
409 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
410 {
411 	int val;
412 
413 	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
414 		val = BLK_MQ_POLL_CLASSIC;
415 	else
416 		val = q->poll_nsec / 1000;
417 
418 	return sprintf(page, "%d\n", val);
419 }
420 
421 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
422 				size_t count)
423 {
424 	int err, val;
425 
426 	if (!q->mq_ops || !q->mq_ops->poll)
427 		return -EINVAL;
428 
429 	err = kstrtoint(page, 10, &val);
430 	if (err < 0)
431 		return err;
432 
433 	if (val == BLK_MQ_POLL_CLASSIC)
434 		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
435 	else if (val >= 0)
436 		q->poll_nsec = val * 1000;
437 	else
438 		return -EINVAL;
439 
440 	return count;
441 }
442 
443 static ssize_t queue_poll_show(struct request_queue *q, char *page)
444 {
445 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
446 }
447 
448 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
449 				size_t count)
450 {
451 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
452 		return -EINVAL;
453 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
454 	pr_info_ratelimited("please use driver specific parameters instead.\n");
455 	return count;
456 }
457 
458 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
459 {
460 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
461 }
462 
463 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
464 				  size_t count)
465 {
466 	unsigned int val;
467 	int err;
468 
469 	err = kstrtou32(page, 10, &val);
470 	if (err || val == 0)
471 		return -EINVAL;
472 
473 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
474 
475 	return count;
476 }
477 
478 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
479 {
480 	if (!wbt_rq_qos(q))
481 		return -EINVAL;
482 
483 	if (wbt_disabled(q))
484 		return sprintf(page, "0\n");
485 
486 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
487 }
488 
489 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
490 				  size_t count)
491 {
492 	struct rq_qos *rqos;
493 	ssize_t ret;
494 	s64 val;
495 
496 	ret = queue_var_store64(&val, page);
497 	if (ret < 0)
498 		return ret;
499 	if (val < -1)
500 		return -EINVAL;
501 
502 	rqos = wbt_rq_qos(q);
503 	if (!rqos) {
504 		ret = wbt_init(q->disk);
505 		if (ret)
506 			return ret;
507 	}
508 
509 	if (val == -1)
510 		val = wbt_default_latency_nsec(q);
511 	else if (val >= 0)
512 		val *= 1000ULL;
513 
514 	if (wbt_get_min_lat(q) == val)
515 		return count;
516 
517 	/*
518 	 * Ensure that the queue is idled, in case the latency update
519 	 * ends up either enabling or disabling wbt completely. We can't
520 	 * have IO inflight if that happens.
521 	 */
522 	blk_mq_freeze_queue(q);
523 	blk_mq_quiesce_queue(q);
524 
525 	wbt_set_min_lat(q, val);
526 
527 	blk_mq_unquiesce_queue(q);
528 	blk_mq_unfreeze_queue(q);
529 
530 	return count;
531 }
532 
533 static ssize_t queue_wc_show(struct request_queue *q, char *page)
534 {
535 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
536 		return sprintf(page, "write back\n");
537 
538 	return sprintf(page, "write through\n");
539 }
540 
541 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
542 			      size_t count)
543 {
544 	int set = -1;
545 
546 	if (!strncmp(page, "write back", 10))
547 		set = 1;
548 	else if (!strncmp(page, "write through", 13) ||
549 		 !strncmp(page, "none", 4))
550 		set = 0;
551 
552 	if (set == -1)
553 		return -EINVAL;
554 
555 	if (set)
556 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
557 	else
558 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
559 
560 	return count;
561 }
562 
563 static ssize_t queue_fua_show(struct request_queue *q, char *page)
564 {
565 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
566 }
567 
568 static ssize_t queue_dax_show(struct request_queue *q, char *page)
569 {
570 	return queue_var_show(blk_queue_dax(q), page);
571 }
572 
573 #define QUEUE_RO_ENTRY(_prefix, _name)			\
574 static struct queue_sysfs_entry _prefix##_entry = {	\
575 	.attr	= { .name = _name, .mode = 0444 },	\
576 	.show	= _prefix##_show,			\
577 };
578 
579 #define QUEUE_RW_ENTRY(_prefix, _name)			\
580 static struct queue_sysfs_entry _prefix##_entry = {	\
581 	.attr	= { .name = _name, .mode = 0644 },	\
582 	.show	= _prefix##_show,			\
583 	.store	= _prefix##_store,			\
584 };
585 
586 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
587 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
588 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
589 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
590 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
591 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
592 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
593 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
594 
595 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
596 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
597 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
598 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
599 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
600 
601 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
602 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
603 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
604 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
605 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
606 
607 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
608 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
609 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
610 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
611 
612 QUEUE_RO_ENTRY(queue_zoned, "zoned");
613 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
614 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
615 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
616 
617 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
618 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
619 QUEUE_RW_ENTRY(queue_poll, "io_poll");
620 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
621 QUEUE_RW_ENTRY(queue_wc, "write_cache");
622 QUEUE_RO_ENTRY(queue_fua, "fua");
623 QUEUE_RO_ENTRY(queue_dax, "dax");
624 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
625 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
626 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
627 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
628 
629 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
630 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
631 #endif
632 
633 /* legacy alias for logical_block_size: */
634 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
635 	.attr = {.name = "hw_sector_size", .mode = 0444 },
636 	.show = queue_logical_block_size_show,
637 };
638 
639 QUEUE_RW_ENTRY(queue_nonrot, "rotational");
640 QUEUE_RW_ENTRY(queue_iostats, "iostats");
641 QUEUE_RW_ENTRY(queue_random, "add_random");
642 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
643 
644 static struct attribute *queue_attrs[] = {
645 	&queue_requests_entry.attr,
646 	&queue_ra_entry.attr,
647 	&queue_max_hw_sectors_entry.attr,
648 	&queue_max_sectors_entry.attr,
649 	&queue_max_segments_entry.attr,
650 	&queue_max_discard_segments_entry.attr,
651 	&queue_max_integrity_segments_entry.attr,
652 	&queue_max_segment_size_entry.attr,
653 	&elv_iosched_entry.attr,
654 	&queue_hw_sector_size_entry.attr,
655 	&queue_logical_block_size_entry.attr,
656 	&queue_physical_block_size_entry.attr,
657 	&queue_chunk_sectors_entry.attr,
658 	&queue_io_min_entry.attr,
659 	&queue_io_opt_entry.attr,
660 	&queue_discard_granularity_entry.attr,
661 	&queue_discard_max_entry.attr,
662 	&queue_discard_max_hw_entry.attr,
663 	&queue_discard_zeroes_data_entry.attr,
664 	&queue_write_same_max_entry.attr,
665 	&queue_write_zeroes_max_entry.attr,
666 	&queue_zone_append_max_entry.attr,
667 	&queue_zone_write_granularity_entry.attr,
668 	&queue_nonrot_entry.attr,
669 	&queue_zoned_entry.attr,
670 	&queue_nr_zones_entry.attr,
671 	&queue_max_open_zones_entry.attr,
672 	&queue_max_active_zones_entry.attr,
673 	&queue_nomerges_entry.attr,
674 	&queue_rq_affinity_entry.attr,
675 	&queue_iostats_entry.attr,
676 	&queue_stable_writes_entry.attr,
677 	&queue_random_entry.attr,
678 	&queue_poll_entry.attr,
679 	&queue_wc_entry.attr,
680 	&queue_fua_entry.attr,
681 	&queue_dax_entry.attr,
682 	&queue_wb_lat_entry.attr,
683 	&queue_poll_delay_entry.attr,
684 	&queue_io_timeout_entry.attr,
685 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
686 	&blk_throtl_sample_time_entry.attr,
687 #endif
688 	&queue_virt_boundary_mask_entry.attr,
689 	&queue_dma_alignment_entry.attr,
690 	NULL,
691 };
692 
693 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
694 				int n)
695 {
696 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
697 	struct request_queue *q = disk->queue;
698 
699 	if (attr == &queue_io_timeout_entry.attr &&
700 		(!q->mq_ops || !q->mq_ops->timeout))
701 			return 0;
702 
703 	if ((attr == &queue_max_open_zones_entry.attr ||
704 	     attr == &queue_max_active_zones_entry.attr) &&
705 	    !blk_queue_is_zoned(q))
706 		return 0;
707 
708 	return attr->mode;
709 }
710 
711 static struct attribute_group queue_attr_group = {
712 	.attrs = queue_attrs,
713 	.is_visible = queue_attr_visible,
714 };
715 
716 
717 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
718 
719 static ssize_t
720 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
721 {
722 	struct queue_sysfs_entry *entry = to_queue(attr);
723 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
724 	struct request_queue *q = disk->queue;
725 	ssize_t res;
726 
727 	if (!entry->show)
728 		return -EIO;
729 	mutex_lock(&q->sysfs_lock);
730 	res = entry->show(q, page);
731 	mutex_unlock(&q->sysfs_lock);
732 	return res;
733 }
734 
735 static ssize_t
736 queue_attr_store(struct kobject *kobj, struct attribute *attr,
737 		    const char *page, size_t length)
738 {
739 	struct queue_sysfs_entry *entry = to_queue(attr);
740 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
741 	struct request_queue *q = disk->queue;
742 	ssize_t res;
743 
744 	if (!entry->store)
745 		return -EIO;
746 
747 	mutex_lock(&q->sysfs_lock);
748 	res = entry->store(q, page, length);
749 	mutex_unlock(&q->sysfs_lock);
750 	return res;
751 }
752 
753 static const struct sysfs_ops queue_sysfs_ops = {
754 	.show	= queue_attr_show,
755 	.store	= queue_attr_store,
756 };
757 
758 static const struct attribute_group *blk_queue_attr_groups[] = {
759 	&queue_attr_group,
760 	NULL
761 };
762 
763 static void blk_queue_release(struct kobject *kobj)
764 {
765 	/* nothing to do here, all data is associated with the parent gendisk */
766 }
767 
768 static const struct kobj_type blk_queue_ktype = {
769 	.default_groups = blk_queue_attr_groups,
770 	.sysfs_ops	= &queue_sysfs_ops,
771 	.release	= blk_queue_release,
772 };
773 
774 static void blk_debugfs_remove(struct gendisk *disk)
775 {
776 	struct request_queue *q = disk->queue;
777 
778 	mutex_lock(&q->debugfs_mutex);
779 	blk_trace_shutdown(q);
780 	debugfs_remove_recursive(q->debugfs_dir);
781 	q->debugfs_dir = NULL;
782 	q->sched_debugfs_dir = NULL;
783 	q->rqos_debugfs_dir = NULL;
784 	mutex_unlock(&q->debugfs_mutex);
785 }
786 
787 /**
788  * blk_register_queue - register a block layer queue with sysfs
789  * @disk: Disk of which the request queue should be registered with sysfs.
790  */
791 int blk_register_queue(struct gendisk *disk)
792 {
793 	struct request_queue *q = disk->queue;
794 	int ret;
795 
796 	mutex_lock(&q->sysfs_dir_lock);
797 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
798 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
799 	if (ret < 0)
800 		goto out_put_queue_kobj;
801 
802 	if (queue_is_mq(q)) {
803 		ret = blk_mq_sysfs_register(disk);
804 		if (ret)
805 			goto out_put_queue_kobj;
806 	}
807 	mutex_lock(&q->sysfs_lock);
808 
809 	mutex_lock(&q->debugfs_mutex);
810 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
811 	if (queue_is_mq(q))
812 		blk_mq_debugfs_register(q);
813 	mutex_unlock(&q->debugfs_mutex);
814 
815 	ret = disk_register_independent_access_ranges(disk);
816 	if (ret)
817 		goto out_debugfs_remove;
818 
819 	if (q->elevator) {
820 		ret = elv_register_queue(q, false);
821 		if (ret)
822 			goto out_unregister_ia_ranges;
823 	}
824 
825 	ret = blk_crypto_sysfs_register(disk);
826 	if (ret)
827 		goto out_elv_unregister;
828 
829 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
830 	wbt_enable_default(disk);
831 	blk_throtl_register(disk);
832 
833 	/* Now everything is ready and send out KOBJ_ADD uevent */
834 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
835 	if (q->elevator)
836 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
837 	mutex_unlock(&q->sysfs_lock);
838 	mutex_unlock(&q->sysfs_dir_lock);
839 
840 	/*
841 	 * SCSI probing may synchronously create and destroy a lot of
842 	 * request_queues for non-existent devices.  Shutting down a fully
843 	 * functional queue takes measureable wallclock time as RCU grace
844 	 * periods are involved.  To avoid excessive latency in these
845 	 * cases, a request_queue starts out in a degraded mode which is
846 	 * faster to shut down and is made fully functional here as
847 	 * request_queues for non-existent devices never get registered.
848 	 */
849 	if (!blk_queue_init_done(q)) {
850 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
851 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
852 	}
853 
854 	return ret;
855 
856 out_elv_unregister:
857 	elv_unregister_queue(q);
858 out_unregister_ia_ranges:
859 	disk_unregister_independent_access_ranges(disk);
860 out_debugfs_remove:
861 	blk_debugfs_remove(disk);
862 	mutex_unlock(&q->sysfs_lock);
863 out_put_queue_kobj:
864 	kobject_put(&disk->queue_kobj);
865 	mutex_unlock(&q->sysfs_dir_lock);
866 	return ret;
867 }
868 
869 /**
870  * blk_unregister_queue - counterpart of blk_register_queue()
871  * @disk: Disk of which the request queue should be unregistered from sysfs.
872  *
873  * Note: the caller is responsible for guaranteeing that this function is called
874  * after blk_register_queue() has finished.
875  */
876 void blk_unregister_queue(struct gendisk *disk)
877 {
878 	struct request_queue *q = disk->queue;
879 
880 	if (WARN_ON(!q))
881 		return;
882 
883 	/* Return early if disk->queue was never registered. */
884 	if (!blk_queue_registered(q))
885 		return;
886 
887 	/*
888 	 * Since sysfs_remove_dir() prevents adding new directory entries
889 	 * before removal of existing entries starts, protect against
890 	 * concurrent elv_iosched_store() calls.
891 	 */
892 	mutex_lock(&q->sysfs_lock);
893 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
894 	mutex_unlock(&q->sysfs_lock);
895 
896 	mutex_lock(&q->sysfs_dir_lock);
897 	/*
898 	 * Remove the sysfs attributes before unregistering the queue data
899 	 * structures that can be modified through sysfs.
900 	 */
901 	if (queue_is_mq(q))
902 		blk_mq_sysfs_unregister(disk);
903 	blk_crypto_sysfs_unregister(disk);
904 
905 	mutex_lock(&q->sysfs_lock);
906 	elv_unregister_queue(q);
907 	disk_unregister_independent_access_ranges(disk);
908 	mutex_unlock(&q->sysfs_lock);
909 
910 	/* Now that we've deleted all child objects, we can delete the queue. */
911 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
912 	kobject_del(&disk->queue_kobj);
913 	mutex_unlock(&q->sysfs_dir_lock);
914 
915 	blk_debugfs_remove(disk);
916 }
917