xref: /linux/block/blk-sysfs.c (revision 6ea76f33e9ab99c7888547e1acba2baf8e4b5b17)
1 /*
2  * Functions related to sysfs handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 #include <linux/blktrace_api.h>
11 #include <linux/blk-mq.h>
12 #include <linux/blk-cgroup.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-wbt.h"
17 
18 struct queue_sysfs_entry {
19 	struct attribute attr;
20 	ssize_t (*show)(struct request_queue *, char *);
21 	ssize_t (*store)(struct request_queue *, const char *, size_t);
22 };
23 
24 static ssize_t
25 queue_var_show(unsigned long var, char *page)
26 {
27 	return sprintf(page, "%lu\n", var);
28 }
29 
30 static ssize_t
31 queue_var_store(unsigned long *var, const char *page, size_t count)
32 {
33 	int err;
34 	unsigned long v;
35 
36 	err = kstrtoul(page, 10, &v);
37 	if (err || v > UINT_MAX)
38 		return -EINVAL;
39 
40 	*var = v;
41 
42 	return count;
43 }
44 
45 static ssize_t queue_var_store64(s64 *var, const char *page)
46 {
47 	int err;
48 	s64 v;
49 
50 	err = kstrtos64(page, 10, &v);
51 	if (err < 0)
52 		return err;
53 
54 	*var = v;
55 	return 0;
56 }
57 
58 static ssize_t queue_requests_show(struct request_queue *q, char *page)
59 {
60 	return queue_var_show(q->nr_requests, (page));
61 }
62 
63 static ssize_t
64 queue_requests_store(struct request_queue *q, const char *page, size_t count)
65 {
66 	unsigned long nr;
67 	int ret, err;
68 
69 	if (!q->request_fn && !q->mq_ops)
70 		return -EINVAL;
71 
72 	ret = queue_var_store(&nr, page, count);
73 	if (ret < 0)
74 		return ret;
75 
76 	if (nr < BLKDEV_MIN_RQ)
77 		nr = BLKDEV_MIN_RQ;
78 
79 	if (q->request_fn)
80 		err = blk_update_nr_requests(q, nr);
81 	else
82 		err = blk_mq_update_nr_requests(q, nr);
83 
84 	if (err)
85 		return err;
86 
87 	return ret;
88 }
89 
90 static ssize_t queue_ra_show(struct request_queue *q, char *page)
91 {
92 	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
93 					(PAGE_SHIFT - 10);
94 
95 	return queue_var_show(ra_kb, (page));
96 }
97 
98 static ssize_t
99 queue_ra_store(struct request_queue *q, const char *page, size_t count)
100 {
101 	unsigned long ra_kb;
102 	ssize_t ret = queue_var_store(&ra_kb, page, count);
103 
104 	if (ret < 0)
105 		return ret;
106 
107 	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
108 
109 	return ret;
110 }
111 
112 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
113 {
114 	int max_sectors_kb = queue_max_sectors(q) >> 1;
115 
116 	return queue_var_show(max_sectors_kb, (page));
117 }
118 
119 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
120 {
121 	return queue_var_show(queue_max_segments(q), (page));
122 }
123 
124 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
125 {
126 	return queue_var_show(q->limits.max_integrity_segments, (page));
127 }
128 
129 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
130 {
131 	if (blk_queue_cluster(q))
132 		return queue_var_show(queue_max_segment_size(q), (page));
133 
134 	return queue_var_show(PAGE_SIZE, (page));
135 }
136 
137 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
138 {
139 	return queue_var_show(queue_logical_block_size(q), page);
140 }
141 
142 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
143 {
144 	return queue_var_show(queue_physical_block_size(q), page);
145 }
146 
147 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
148 {
149 	return queue_var_show(q->limits.chunk_sectors, page);
150 }
151 
152 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
153 {
154 	return queue_var_show(queue_io_min(q), page);
155 }
156 
157 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
158 {
159 	return queue_var_show(queue_io_opt(q), page);
160 }
161 
162 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
163 {
164 	return queue_var_show(q->limits.discard_granularity, page);
165 }
166 
167 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
168 {
169 
170 	return sprintf(page, "%llu\n",
171 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
172 }
173 
174 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
175 {
176 	return sprintf(page, "%llu\n",
177 		       (unsigned long long)q->limits.max_discard_sectors << 9);
178 }
179 
180 static ssize_t queue_discard_max_store(struct request_queue *q,
181 				       const char *page, size_t count)
182 {
183 	unsigned long max_discard;
184 	ssize_t ret = queue_var_store(&max_discard, page, count);
185 
186 	if (ret < 0)
187 		return ret;
188 
189 	if (max_discard & (q->limits.discard_granularity - 1))
190 		return -EINVAL;
191 
192 	max_discard >>= 9;
193 	if (max_discard > UINT_MAX)
194 		return -EINVAL;
195 
196 	if (max_discard > q->limits.max_hw_discard_sectors)
197 		max_discard = q->limits.max_hw_discard_sectors;
198 
199 	q->limits.max_discard_sectors = max_discard;
200 	return ret;
201 }
202 
203 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
204 {
205 	return queue_var_show(queue_discard_zeroes_data(q), page);
206 }
207 
208 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
209 {
210 	return sprintf(page, "%llu\n",
211 		(unsigned long long)q->limits.max_write_same_sectors << 9);
212 }
213 
214 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
215 {
216 	return sprintf(page, "%llu\n",
217 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
218 }
219 
220 static ssize_t
221 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
222 {
223 	unsigned long max_sectors_kb,
224 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
225 			page_kb = 1 << (PAGE_SHIFT - 10);
226 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
227 
228 	if (ret < 0)
229 		return ret;
230 
231 	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
232 					 q->limits.max_dev_sectors >> 1);
233 
234 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
235 		return -EINVAL;
236 
237 	spin_lock_irq(q->queue_lock);
238 	q->limits.max_sectors = max_sectors_kb << 1;
239 	spin_unlock_irq(q->queue_lock);
240 
241 	return ret;
242 }
243 
244 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
245 {
246 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
247 
248 	return queue_var_show(max_hw_sectors_kb, (page));
249 }
250 
251 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
252 static ssize_t								\
253 queue_show_##name(struct request_queue *q, char *page)			\
254 {									\
255 	int bit;							\
256 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
257 	return queue_var_show(neg ? !bit : bit, page);			\
258 }									\
259 static ssize_t								\
260 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
261 {									\
262 	unsigned long val;						\
263 	ssize_t ret;							\
264 	ret = queue_var_store(&val, page, count);			\
265 	if (ret < 0)							\
266 		 return ret;						\
267 	if (neg)							\
268 		val = !val;						\
269 									\
270 	spin_lock_irq(q->queue_lock);					\
271 	if (val)							\
272 		queue_flag_set(QUEUE_FLAG_##flag, q);			\
273 	else								\
274 		queue_flag_clear(QUEUE_FLAG_##flag, q);			\
275 	spin_unlock_irq(q->queue_lock);					\
276 	return ret;							\
277 }
278 
279 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
280 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
281 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
282 #undef QUEUE_SYSFS_BIT_FNS
283 
284 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
285 {
286 	switch (blk_queue_zoned_model(q)) {
287 	case BLK_ZONED_HA:
288 		return sprintf(page, "host-aware\n");
289 	case BLK_ZONED_HM:
290 		return sprintf(page, "host-managed\n");
291 	default:
292 		return sprintf(page, "none\n");
293 	}
294 }
295 
296 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
297 {
298 	return queue_var_show((blk_queue_nomerges(q) << 1) |
299 			       blk_queue_noxmerges(q), page);
300 }
301 
302 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
303 				    size_t count)
304 {
305 	unsigned long nm;
306 	ssize_t ret = queue_var_store(&nm, page, count);
307 
308 	if (ret < 0)
309 		return ret;
310 
311 	spin_lock_irq(q->queue_lock);
312 	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
313 	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
314 	if (nm == 2)
315 		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
316 	else if (nm)
317 		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
318 	spin_unlock_irq(q->queue_lock);
319 
320 	return ret;
321 }
322 
323 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
324 {
325 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
326 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
327 
328 	return queue_var_show(set << force, page);
329 }
330 
331 static ssize_t
332 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
333 {
334 	ssize_t ret = -EINVAL;
335 #ifdef CONFIG_SMP
336 	unsigned long val;
337 
338 	ret = queue_var_store(&val, page, count);
339 	if (ret < 0)
340 		return ret;
341 
342 	spin_lock_irq(q->queue_lock);
343 	if (val == 2) {
344 		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
345 		queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
346 	} else if (val == 1) {
347 		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
348 		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
349 	} else if (val == 0) {
350 		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
351 		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
352 	}
353 	spin_unlock_irq(q->queue_lock);
354 #endif
355 	return ret;
356 }
357 
358 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
359 {
360 	int val;
361 
362 	if (q->poll_nsec == -1)
363 		val = -1;
364 	else
365 		val = q->poll_nsec / 1000;
366 
367 	return sprintf(page, "%d\n", val);
368 }
369 
370 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
371 				size_t count)
372 {
373 	int err, val;
374 
375 	if (!q->mq_ops || !q->mq_ops->poll)
376 		return -EINVAL;
377 
378 	err = kstrtoint(page, 10, &val);
379 	if (err < 0)
380 		return err;
381 
382 	if (val == -1)
383 		q->poll_nsec = -1;
384 	else
385 		q->poll_nsec = val * 1000;
386 
387 	return count;
388 }
389 
390 static ssize_t queue_poll_show(struct request_queue *q, char *page)
391 {
392 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
393 }
394 
395 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
396 				size_t count)
397 {
398 	unsigned long poll_on;
399 	ssize_t ret;
400 
401 	if (!q->mq_ops || !q->mq_ops->poll)
402 		return -EINVAL;
403 
404 	ret = queue_var_store(&poll_on, page, count);
405 	if (ret < 0)
406 		return ret;
407 
408 	spin_lock_irq(q->queue_lock);
409 	if (poll_on)
410 		queue_flag_set(QUEUE_FLAG_POLL, q);
411 	else
412 		queue_flag_clear(QUEUE_FLAG_POLL, q);
413 	spin_unlock_irq(q->queue_lock);
414 
415 	return ret;
416 }
417 
418 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
419 {
420 	if (!q->rq_wb)
421 		return -EINVAL;
422 
423 	return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
424 }
425 
426 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
427 				  size_t count)
428 {
429 	struct rq_wb *rwb;
430 	ssize_t ret;
431 	s64 val;
432 
433 	ret = queue_var_store64(&val, page);
434 	if (ret < 0)
435 		return ret;
436 	if (val < -1)
437 		return -EINVAL;
438 
439 	rwb = q->rq_wb;
440 	if (!rwb) {
441 		ret = wbt_init(q);
442 		if (ret)
443 			return ret;
444 
445 		rwb = q->rq_wb;
446 		if (!rwb)
447 			return -EINVAL;
448 	}
449 
450 	if (val == -1)
451 		rwb->min_lat_nsec = wbt_default_latency_nsec(q);
452 	else if (val >= 0)
453 		rwb->min_lat_nsec = val * 1000ULL;
454 
455 	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
456 		rwb->enable_state = WBT_STATE_ON_MANUAL;
457 
458 	wbt_update_limits(rwb);
459 	return count;
460 }
461 
462 static ssize_t queue_wc_show(struct request_queue *q, char *page)
463 {
464 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
465 		return sprintf(page, "write back\n");
466 
467 	return sprintf(page, "write through\n");
468 }
469 
470 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
471 			      size_t count)
472 {
473 	int set = -1;
474 
475 	if (!strncmp(page, "write back", 10))
476 		set = 1;
477 	else if (!strncmp(page, "write through", 13) ||
478 		 !strncmp(page, "none", 4))
479 		set = 0;
480 
481 	if (set == -1)
482 		return -EINVAL;
483 
484 	spin_lock_irq(q->queue_lock);
485 	if (set)
486 		queue_flag_set(QUEUE_FLAG_WC, q);
487 	else
488 		queue_flag_clear(QUEUE_FLAG_WC, q);
489 	spin_unlock_irq(q->queue_lock);
490 
491 	return count;
492 }
493 
494 static ssize_t queue_dax_show(struct request_queue *q, char *page)
495 {
496 	return queue_var_show(blk_queue_dax(q), page);
497 }
498 
499 static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
500 {
501 	return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
502 			pre, (long long) stat->nr_samples,
503 			(long long) stat->mean, (long long) stat->min,
504 			(long long) stat->max);
505 }
506 
507 static ssize_t queue_stats_show(struct request_queue *q, char *page)
508 {
509 	struct blk_rq_stat stat[2];
510 	ssize_t ret;
511 
512 	blk_queue_stat_get(q, stat);
513 
514 	ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
515 	ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
516 	return ret;
517 }
518 
519 static struct queue_sysfs_entry queue_requests_entry = {
520 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
521 	.show = queue_requests_show,
522 	.store = queue_requests_store,
523 };
524 
525 static struct queue_sysfs_entry queue_ra_entry = {
526 	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
527 	.show = queue_ra_show,
528 	.store = queue_ra_store,
529 };
530 
531 static struct queue_sysfs_entry queue_max_sectors_entry = {
532 	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
533 	.show = queue_max_sectors_show,
534 	.store = queue_max_sectors_store,
535 };
536 
537 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
538 	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
539 	.show = queue_max_hw_sectors_show,
540 };
541 
542 static struct queue_sysfs_entry queue_max_segments_entry = {
543 	.attr = {.name = "max_segments", .mode = S_IRUGO },
544 	.show = queue_max_segments_show,
545 };
546 
547 static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
548 	.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
549 	.show = queue_max_integrity_segments_show,
550 };
551 
552 static struct queue_sysfs_entry queue_max_segment_size_entry = {
553 	.attr = {.name = "max_segment_size", .mode = S_IRUGO },
554 	.show = queue_max_segment_size_show,
555 };
556 
557 static struct queue_sysfs_entry queue_iosched_entry = {
558 	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
559 	.show = elv_iosched_show,
560 	.store = elv_iosched_store,
561 };
562 
563 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
564 	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
565 	.show = queue_logical_block_size_show,
566 };
567 
568 static struct queue_sysfs_entry queue_logical_block_size_entry = {
569 	.attr = {.name = "logical_block_size", .mode = S_IRUGO },
570 	.show = queue_logical_block_size_show,
571 };
572 
573 static struct queue_sysfs_entry queue_physical_block_size_entry = {
574 	.attr = {.name = "physical_block_size", .mode = S_IRUGO },
575 	.show = queue_physical_block_size_show,
576 };
577 
578 static struct queue_sysfs_entry queue_chunk_sectors_entry = {
579 	.attr = {.name = "chunk_sectors", .mode = S_IRUGO },
580 	.show = queue_chunk_sectors_show,
581 };
582 
583 static struct queue_sysfs_entry queue_io_min_entry = {
584 	.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
585 	.show = queue_io_min_show,
586 };
587 
588 static struct queue_sysfs_entry queue_io_opt_entry = {
589 	.attr = {.name = "optimal_io_size", .mode = S_IRUGO },
590 	.show = queue_io_opt_show,
591 };
592 
593 static struct queue_sysfs_entry queue_discard_granularity_entry = {
594 	.attr = {.name = "discard_granularity", .mode = S_IRUGO },
595 	.show = queue_discard_granularity_show,
596 };
597 
598 static struct queue_sysfs_entry queue_discard_max_hw_entry = {
599 	.attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
600 	.show = queue_discard_max_hw_show,
601 };
602 
603 static struct queue_sysfs_entry queue_discard_max_entry = {
604 	.attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
605 	.show = queue_discard_max_show,
606 	.store = queue_discard_max_store,
607 };
608 
609 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
610 	.attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
611 	.show = queue_discard_zeroes_data_show,
612 };
613 
614 static struct queue_sysfs_entry queue_write_same_max_entry = {
615 	.attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
616 	.show = queue_write_same_max_show,
617 };
618 
619 static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
620 	.attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
621 	.show = queue_write_zeroes_max_show,
622 };
623 
624 static struct queue_sysfs_entry queue_nonrot_entry = {
625 	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
626 	.show = queue_show_nonrot,
627 	.store = queue_store_nonrot,
628 };
629 
630 static struct queue_sysfs_entry queue_zoned_entry = {
631 	.attr = {.name = "zoned", .mode = S_IRUGO },
632 	.show = queue_zoned_show,
633 };
634 
635 static struct queue_sysfs_entry queue_nomerges_entry = {
636 	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
637 	.show = queue_nomerges_show,
638 	.store = queue_nomerges_store,
639 };
640 
641 static struct queue_sysfs_entry queue_rq_affinity_entry = {
642 	.attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
643 	.show = queue_rq_affinity_show,
644 	.store = queue_rq_affinity_store,
645 };
646 
647 static struct queue_sysfs_entry queue_iostats_entry = {
648 	.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
649 	.show = queue_show_iostats,
650 	.store = queue_store_iostats,
651 };
652 
653 static struct queue_sysfs_entry queue_random_entry = {
654 	.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
655 	.show = queue_show_random,
656 	.store = queue_store_random,
657 };
658 
659 static struct queue_sysfs_entry queue_poll_entry = {
660 	.attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
661 	.show = queue_poll_show,
662 	.store = queue_poll_store,
663 };
664 
665 static struct queue_sysfs_entry queue_poll_delay_entry = {
666 	.attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
667 	.show = queue_poll_delay_show,
668 	.store = queue_poll_delay_store,
669 };
670 
671 static struct queue_sysfs_entry queue_wc_entry = {
672 	.attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
673 	.show = queue_wc_show,
674 	.store = queue_wc_store,
675 };
676 
677 static struct queue_sysfs_entry queue_dax_entry = {
678 	.attr = {.name = "dax", .mode = S_IRUGO },
679 	.show = queue_dax_show,
680 };
681 
682 static struct queue_sysfs_entry queue_stats_entry = {
683 	.attr = {.name = "stats", .mode = S_IRUGO },
684 	.show = queue_stats_show,
685 };
686 
687 static struct queue_sysfs_entry queue_wb_lat_entry = {
688 	.attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
689 	.show = queue_wb_lat_show,
690 	.store = queue_wb_lat_store,
691 };
692 
693 static struct attribute *default_attrs[] = {
694 	&queue_requests_entry.attr,
695 	&queue_ra_entry.attr,
696 	&queue_max_hw_sectors_entry.attr,
697 	&queue_max_sectors_entry.attr,
698 	&queue_max_segments_entry.attr,
699 	&queue_max_integrity_segments_entry.attr,
700 	&queue_max_segment_size_entry.attr,
701 	&queue_iosched_entry.attr,
702 	&queue_hw_sector_size_entry.attr,
703 	&queue_logical_block_size_entry.attr,
704 	&queue_physical_block_size_entry.attr,
705 	&queue_chunk_sectors_entry.attr,
706 	&queue_io_min_entry.attr,
707 	&queue_io_opt_entry.attr,
708 	&queue_discard_granularity_entry.attr,
709 	&queue_discard_max_entry.attr,
710 	&queue_discard_max_hw_entry.attr,
711 	&queue_discard_zeroes_data_entry.attr,
712 	&queue_write_same_max_entry.attr,
713 	&queue_write_zeroes_max_entry.attr,
714 	&queue_nonrot_entry.attr,
715 	&queue_zoned_entry.attr,
716 	&queue_nomerges_entry.attr,
717 	&queue_rq_affinity_entry.attr,
718 	&queue_iostats_entry.attr,
719 	&queue_random_entry.attr,
720 	&queue_poll_entry.attr,
721 	&queue_wc_entry.attr,
722 	&queue_dax_entry.attr,
723 	&queue_stats_entry.attr,
724 	&queue_wb_lat_entry.attr,
725 	&queue_poll_delay_entry.attr,
726 	NULL,
727 };
728 
729 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
730 
731 static ssize_t
732 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
733 {
734 	struct queue_sysfs_entry *entry = to_queue(attr);
735 	struct request_queue *q =
736 		container_of(kobj, struct request_queue, kobj);
737 	ssize_t res;
738 
739 	if (!entry->show)
740 		return -EIO;
741 	mutex_lock(&q->sysfs_lock);
742 	if (blk_queue_dying(q)) {
743 		mutex_unlock(&q->sysfs_lock);
744 		return -ENOENT;
745 	}
746 	res = entry->show(q, page);
747 	mutex_unlock(&q->sysfs_lock);
748 	return res;
749 }
750 
751 static ssize_t
752 queue_attr_store(struct kobject *kobj, struct attribute *attr,
753 		    const char *page, size_t length)
754 {
755 	struct queue_sysfs_entry *entry = to_queue(attr);
756 	struct request_queue *q;
757 	ssize_t res;
758 
759 	if (!entry->store)
760 		return -EIO;
761 
762 	q = container_of(kobj, struct request_queue, kobj);
763 	mutex_lock(&q->sysfs_lock);
764 	if (blk_queue_dying(q)) {
765 		mutex_unlock(&q->sysfs_lock);
766 		return -ENOENT;
767 	}
768 	res = entry->store(q, page, length);
769 	mutex_unlock(&q->sysfs_lock);
770 	return res;
771 }
772 
773 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
774 {
775 	struct request_queue *q = container_of(rcu_head, struct request_queue,
776 					       rcu_head);
777 	kmem_cache_free(blk_requestq_cachep, q);
778 }
779 
780 /**
781  * blk_release_queue: - release a &struct request_queue when it is no longer needed
782  * @kobj:    the kobj belonging to the request queue to be released
783  *
784  * Description:
785  *     blk_release_queue is the pair to blk_init_queue() or
786  *     blk_queue_make_request().  It should be called when a request queue is
787  *     being released; typically when a block device is being de-registered.
788  *     Currently, its primary task it to free all the &struct request
789  *     structures that were allocated to the queue and the queue itself.
790  *
791  * Note:
792  *     The low level driver must have finished any outstanding requests first
793  *     via blk_cleanup_queue().
794  **/
795 static void blk_release_queue(struct kobject *kobj)
796 {
797 	struct request_queue *q =
798 		container_of(kobj, struct request_queue, kobj);
799 
800 	wbt_exit(q);
801 	bdi_exit(&q->backing_dev_info);
802 	blkcg_exit_queue(q);
803 
804 	if (q->elevator) {
805 		spin_lock_irq(q->queue_lock);
806 		ioc_clear_queue(q);
807 		spin_unlock_irq(q->queue_lock);
808 		elevator_exit(q->elevator);
809 	}
810 
811 	blk_exit_rl(&q->root_rl);
812 
813 	if (q->queue_tags)
814 		__blk_queue_free_tags(q);
815 
816 	if (!q->mq_ops)
817 		blk_free_flush_queue(q->fq);
818 	else
819 		blk_mq_release(q);
820 
821 	blk_trace_shutdown(q);
822 
823 	if (q->bio_split)
824 		bioset_free(q->bio_split);
825 
826 	ida_simple_remove(&blk_queue_ida, q->id);
827 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
828 }
829 
830 static const struct sysfs_ops queue_sysfs_ops = {
831 	.show	= queue_attr_show,
832 	.store	= queue_attr_store,
833 };
834 
835 struct kobj_type blk_queue_ktype = {
836 	.sysfs_ops	= &queue_sysfs_ops,
837 	.default_attrs	= default_attrs,
838 	.release	= blk_release_queue,
839 };
840 
841 static void blk_wb_init(struct request_queue *q)
842 {
843 #ifndef CONFIG_BLK_WBT_MQ
844 	if (q->mq_ops)
845 		return;
846 #endif
847 #ifndef CONFIG_BLK_WBT_SQ
848 	if (q->request_fn)
849 		return;
850 #endif
851 
852 	/*
853 	 * If this fails, we don't get throttling
854 	 */
855 	wbt_init(q);
856 }
857 
858 int blk_register_queue(struct gendisk *disk)
859 {
860 	int ret;
861 	struct device *dev = disk_to_dev(disk);
862 	struct request_queue *q = disk->queue;
863 
864 	if (WARN_ON(!q))
865 		return -ENXIO;
866 
867 	/*
868 	 * SCSI probing may synchronously create and destroy a lot of
869 	 * request_queues for non-existent devices.  Shutting down a fully
870 	 * functional queue takes measureable wallclock time as RCU grace
871 	 * periods are involved.  To avoid excessive latency in these
872 	 * cases, a request_queue starts out in a degraded mode which is
873 	 * faster to shut down and is made fully functional here as
874 	 * request_queues for non-existent devices never get registered.
875 	 */
876 	if (!blk_queue_init_done(q)) {
877 		queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
878 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
879 		blk_queue_bypass_end(q);
880 	}
881 
882 	ret = blk_trace_init_sysfs(dev);
883 	if (ret)
884 		return ret;
885 
886 	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
887 	if (ret < 0) {
888 		blk_trace_remove_sysfs(dev);
889 		return ret;
890 	}
891 
892 	kobject_uevent(&q->kobj, KOBJ_ADD);
893 
894 	if (q->mq_ops)
895 		blk_mq_register_dev(dev, q);
896 
897 	blk_wb_init(q);
898 
899 	if (!q->request_fn)
900 		return 0;
901 
902 	ret = elv_register_queue(q);
903 	if (ret) {
904 		kobject_uevent(&q->kobj, KOBJ_REMOVE);
905 		kobject_del(&q->kobj);
906 		blk_trace_remove_sysfs(dev);
907 		kobject_put(&dev->kobj);
908 		return ret;
909 	}
910 
911 	return 0;
912 }
913 
914 void blk_unregister_queue(struct gendisk *disk)
915 {
916 	struct request_queue *q = disk->queue;
917 
918 	if (WARN_ON(!q))
919 		return;
920 
921 	if (q->mq_ops)
922 		blk_mq_unregister_dev(disk_to_dev(disk), q);
923 
924 	if (q->request_fn)
925 		elv_unregister_queue(q);
926 
927 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
928 	kobject_del(&q->kobj);
929 	blk_trace_remove_sysfs(disk_to_dev(disk));
930 	kobject_put(&disk_to_dev(disk)->kobj);
931 }
932