xref: /linux/block/blk-sysfs.c (revision 11167b29e53b9a06635309445ead7edfd54e6616)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct gendisk *disk, char *page);
26 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
27 	void (*load_module)(struct gendisk *disk, const char *page, size_t count);
28 };
29 
30 static ssize_t
queue_var_show(unsigned long var,char * page)31 queue_var_show(unsigned long var, char *page)
32 {
33 	return sysfs_emit(page, "%lu\n", var);
34 }
35 
36 static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)37 queue_var_store(unsigned long *var, const char *page, size_t count)
38 {
39 	int err;
40 	unsigned long v;
41 
42 	err = kstrtoul(page, 10, &v);
43 	if (err || v > UINT_MAX)
44 		return -EINVAL;
45 
46 	*var = v;
47 
48 	return count;
49 }
50 
queue_requests_show(struct gendisk * disk,char * page)51 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
52 {
53 	return queue_var_show(disk->queue->nr_requests, page);
54 }
55 
56 static ssize_t
queue_requests_store(struct gendisk * disk,const char * page,size_t count)57 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
58 {
59 	unsigned long nr;
60 	int ret, err;
61 
62 	if (!queue_is_mq(disk->queue))
63 		return -EINVAL;
64 
65 	ret = queue_var_store(&nr, page, count);
66 	if (ret < 0)
67 		return ret;
68 
69 	if (nr < BLKDEV_MIN_RQ)
70 		nr = BLKDEV_MIN_RQ;
71 
72 	err = blk_mq_update_nr_requests(disk->queue, nr);
73 	if (err)
74 		return err;
75 
76 	return ret;
77 }
78 
queue_ra_show(struct gendisk * disk,char * page)79 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
80 {
81 	return queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
82 }
83 
84 static ssize_t
queue_ra_store(struct gendisk * disk,const char * page,size_t count)85 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
86 {
87 	unsigned long ra_kb;
88 	ssize_t ret;
89 
90 	ret = queue_var_store(&ra_kb, page, count);
91 	if (ret < 0)
92 		return ret;
93 	disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
94 	return ret;
95 }
96 
97 #define QUEUE_SYSFS_LIMIT_SHOW(_field)					\
98 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
99 {									\
100 	return queue_var_show(disk->queue->limits._field, page);	\
101 }
102 
103 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)104 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
105 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
106 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
107 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
108 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
109 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
110 QUEUE_SYSFS_LIMIT_SHOW(io_min)
111 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
112 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
113 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
114 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
115 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
116 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
117 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
118 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
119 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
120 
121 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field)			\
122 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
123 {									\
124 	return sysfs_emit(page, "%llu\n",				\
125 		(unsigned long long)disk->queue->limits._field <<	\
126 			SECTOR_SHIFT);					\
127 }
128 
129 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
130 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
131 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
132 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
133 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
134 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
135 
136 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field)			\
137 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
138 {									\
139 	return queue_var_show(disk->queue->limits._field >> 1, page);	\
140 }
141 
142 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
143 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
144 
145 #define QUEUE_SYSFS_SHOW_CONST(_name, _val)				\
146 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
147 {									\
148 	return sysfs_emit(page, "%d\n", _val);				\
149 }
150 
151 /* deprecated fields */
152 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
153 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
154 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
155 
156 static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
157 		const char *page, size_t count)
158 {
159 	unsigned long max_discard_bytes;
160 	struct queue_limits lim;
161 	ssize_t ret;
162 	int err;
163 
164 	ret = queue_var_store(&max_discard_bytes, page, count);
165 	if (ret < 0)
166 		return ret;
167 
168 	if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
169 		return -EINVAL;
170 
171 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
172 		return -EINVAL;
173 
174 	lim = queue_limits_start_update(disk->queue);
175 	lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
176 	err = queue_limits_commit_update(disk->queue, &lim);
177 	if (err)
178 		return err;
179 	return ret;
180 }
181 
182 static ssize_t
queue_max_sectors_store(struct gendisk * disk,const char * page,size_t count)183 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count)
184 {
185 	unsigned long max_sectors_kb;
186 	struct queue_limits lim;
187 	ssize_t ret;
188 	int err;
189 
190 	ret = queue_var_store(&max_sectors_kb, page, count);
191 	if (ret < 0)
192 		return ret;
193 
194 	lim = queue_limits_start_update(disk->queue);
195 	lim.max_user_sectors = max_sectors_kb << 1;
196 	err = queue_limits_commit_update(disk->queue, &lim);
197 	if (err)
198 		return err;
199 	return ret;
200 }
201 
queue_feature_store(struct gendisk * disk,const char * page,size_t count,blk_features_t feature)202 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
203 		size_t count, blk_features_t feature)
204 {
205 	struct queue_limits lim;
206 	unsigned long val;
207 	ssize_t ret;
208 
209 	ret = queue_var_store(&val, page, count);
210 	if (ret < 0)
211 		return ret;
212 
213 	lim = queue_limits_start_update(disk->queue);
214 	if (val)
215 		lim.features |= feature;
216 	else
217 		lim.features &= ~feature;
218 	ret = queue_limits_commit_update(disk->queue, &lim);
219 	if (ret)
220 		return ret;
221 	return count;
222 }
223 
224 #define QUEUE_SYSFS_FEATURE(_name, _feature)				\
225 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
226 {									\
227 	return sysfs_emit(page, "%u\n",					\
228 		!!(disk->queue->limits.features & _feature));		\
229 }									\
230 static ssize_t queue_##_name##_store(struct gendisk *disk,		\
231 		const char *page, size_t count)				\
232 {									\
233 	return queue_feature_store(disk, page, count, _feature);	\
234 }
235 
236 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
237 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
238 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
239 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
240 
241 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature)			\
242 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
243 {									\
244 	return sysfs_emit(page, "%u\n",					\
245 		!!(disk->queue->limits.features & _feature));		\
246 }
247 
248 QUEUE_SYSFS_FEATURE_SHOW(poll, BLK_FEAT_POLL);
249 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
250 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
251 
queue_zoned_show(struct gendisk * disk,char * page)252 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
253 {
254 	if (blk_queue_is_zoned(disk->queue))
255 		return sysfs_emit(page, "host-managed\n");
256 	return sysfs_emit(page, "none\n");
257 }
258 
queue_nr_zones_show(struct gendisk * disk,char * page)259 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
260 {
261 	return queue_var_show(disk_nr_zones(disk), page);
262 }
263 
queue_iostats_passthrough_show(struct gendisk * disk,char * page)264 static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
265 {
266 	return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
267 }
268 
queue_iostats_passthrough_store(struct gendisk * disk,const char * page,size_t count)269 static ssize_t queue_iostats_passthrough_store(struct gendisk *disk,
270 					       const char *page, size_t count)
271 {
272 	struct queue_limits lim;
273 	unsigned long ios;
274 	ssize_t ret;
275 
276 	ret = queue_var_store(&ios, page, count);
277 	if (ret < 0)
278 		return ret;
279 
280 	lim = queue_limits_start_update(disk->queue);
281 	if (ios)
282 		lim.flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
283 	else
284 		lim.flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
285 
286 	ret = queue_limits_commit_update(disk->queue, &lim);
287 	if (ret)
288 		return ret;
289 
290 	return count;
291 }
queue_nomerges_show(struct gendisk * disk,char * page)292 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
293 {
294 	return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
295 			       blk_queue_noxmerges(disk->queue), page);
296 }
297 
queue_nomerges_store(struct gendisk * disk,const char * page,size_t count)298 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
299 				    size_t count)
300 {
301 	unsigned long nm;
302 	ssize_t ret = queue_var_store(&nm, page, count);
303 
304 	if (ret < 0)
305 		return ret;
306 
307 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, disk->queue);
308 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, disk->queue);
309 	if (nm == 2)
310 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, disk->queue);
311 	else if (nm)
312 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, disk->queue);
313 
314 	return ret;
315 }
316 
queue_rq_affinity_show(struct gendisk * disk,char * page)317 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
318 {
319 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
320 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
321 
322 	return queue_var_show(set << force, page);
323 }
324 
325 static ssize_t
queue_rq_affinity_store(struct gendisk * disk,const char * page,size_t count)326 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
327 {
328 	ssize_t ret = -EINVAL;
329 #ifdef CONFIG_SMP
330 	struct request_queue *q = disk->queue;
331 	unsigned long val;
332 
333 	ret = queue_var_store(&val, page, count);
334 	if (ret < 0)
335 		return ret;
336 
337 	if (val == 2) {
338 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
339 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
340 	} else if (val == 1) {
341 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
342 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
343 	} else if (val == 0) {
344 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
345 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
346 	}
347 #endif
348 	return ret;
349 }
350 
queue_poll_delay_store(struct gendisk * disk,const char * page,size_t count)351 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
352 				size_t count)
353 {
354 	return count;
355 }
356 
queue_poll_store(struct gendisk * disk,const char * page,size_t count)357 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
358 				size_t count)
359 {
360 	if (!(disk->queue->limits.features & BLK_FEAT_POLL))
361 		return -EINVAL;
362 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
363 	pr_info_ratelimited("please use driver specific parameters instead.\n");
364 	return count;
365 }
366 
queue_io_timeout_show(struct gendisk * disk,char * page)367 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
368 {
369 	return sysfs_emit(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
370 }
371 
queue_io_timeout_store(struct gendisk * disk,const char * page,size_t count)372 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
373 				  size_t count)
374 {
375 	unsigned int val;
376 	int err;
377 
378 	err = kstrtou32(page, 10, &val);
379 	if (err || val == 0)
380 		return -EINVAL;
381 
382 	blk_queue_rq_timeout(disk->queue, msecs_to_jiffies(val));
383 
384 	return count;
385 }
386 
queue_wc_show(struct gendisk * disk,char * page)387 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
388 {
389 	if (blk_queue_write_cache(disk->queue))
390 		return sysfs_emit(page, "write back\n");
391 	return sysfs_emit(page, "write through\n");
392 }
393 
queue_wc_store(struct gendisk * disk,const char * page,size_t count)394 static ssize_t queue_wc_store(struct gendisk *disk, const char *page,
395 			      size_t count)
396 {
397 	struct queue_limits lim;
398 	bool disable;
399 	int err;
400 
401 	if (!strncmp(page, "write back", 10)) {
402 		disable = false;
403 	} else if (!strncmp(page, "write through", 13) ||
404 		   !strncmp(page, "none", 4)) {
405 		disable = true;
406 	} else {
407 		return -EINVAL;
408 	}
409 
410 	lim = queue_limits_start_update(disk->queue);
411 	if (disable)
412 		lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
413 	else
414 		lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
415 	err = queue_limits_commit_update(disk->queue, &lim);
416 	if (err)
417 		return err;
418 	return count;
419 }
420 
421 #define QUEUE_RO_ENTRY(_prefix, _name)			\
422 static struct queue_sysfs_entry _prefix##_entry = {	\
423 	.attr	= { .name = _name, .mode = 0444 },	\
424 	.show	= _prefix##_show,			\
425 };
426 
427 #define QUEUE_RW_ENTRY(_prefix, _name)			\
428 static struct queue_sysfs_entry _prefix##_entry = {	\
429 	.attr	= { .name = _name, .mode = 0644 },	\
430 	.show	= _prefix##_show,			\
431 	.store	= _prefix##_store,			\
432 };
433 
434 #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name)		\
435 static struct queue_sysfs_entry _prefix##_entry = {		\
436 	.attr		= { .name = _name, .mode = 0644 },	\
437 	.show		= _prefix##_show,			\
438 	.load_module	= _prefix##_load_module,		\
439 	.store		= _prefix##_store,			\
440 }
441 
442 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
443 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
444 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
445 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
446 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
447 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
448 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
449 QUEUE_RW_LOAD_MODULE_ENTRY(elv_iosched, "scheduler");
450 
451 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
452 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
453 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
454 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
455 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
456 
457 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
458 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
459 QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
460 QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
461 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
462 
463 QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
464 QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
465 		"atomic_write_boundary_bytes");
466 QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
467 QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
468 
469 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
470 QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
471 QUEUE_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
472 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
473 
474 QUEUE_RO_ENTRY(queue_zoned, "zoned");
475 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
476 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
477 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
478 
479 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
480 QUEUE_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
481 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
482 QUEUE_RW_ENTRY(queue_poll, "io_poll");
483 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
484 QUEUE_RW_ENTRY(queue_wc, "write_cache");
485 QUEUE_RO_ENTRY(queue_fua, "fua");
486 QUEUE_RO_ENTRY(queue_dax, "dax");
487 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
488 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
489 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
490 
491 /* legacy alias for logical_block_size: */
492 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
493 	.attr = {.name = "hw_sector_size", .mode = 0444 },
494 	.show = queue_logical_block_size_show,
495 };
496 
497 QUEUE_RW_ENTRY(queue_rotational, "rotational");
498 QUEUE_RW_ENTRY(queue_iostats, "iostats");
499 QUEUE_RW_ENTRY(queue_add_random, "add_random");
500 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
501 
502 #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)503 static ssize_t queue_var_store64(s64 *var, const char *page)
504 {
505 	int err;
506 	s64 v;
507 
508 	err = kstrtos64(page, 10, &v);
509 	if (err < 0)
510 		return err;
511 
512 	*var = v;
513 	return 0;
514 }
515 
queue_wb_lat_show(struct gendisk * disk,char * page)516 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
517 {
518 	if (!wbt_rq_qos(disk->queue))
519 		return -EINVAL;
520 
521 	if (wbt_disabled(disk->queue))
522 		return sysfs_emit(page, "0\n");
523 
524 	return sysfs_emit(page, "%llu\n",
525 		div_u64(wbt_get_min_lat(disk->queue), 1000));
526 }
527 
queue_wb_lat_store(struct gendisk * disk,const char * page,size_t count)528 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
529 				  size_t count)
530 {
531 	struct request_queue *q = disk->queue;
532 	struct rq_qos *rqos;
533 	ssize_t ret;
534 	s64 val;
535 
536 	ret = queue_var_store64(&val, page);
537 	if (ret < 0)
538 		return ret;
539 	if (val < -1)
540 		return -EINVAL;
541 
542 	rqos = wbt_rq_qos(q);
543 	if (!rqos) {
544 		ret = wbt_init(disk);
545 		if (ret)
546 			return ret;
547 	}
548 
549 	if (val == -1)
550 		val = wbt_default_latency_nsec(q);
551 	else if (val >= 0)
552 		val *= 1000ULL;
553 
554 	if (wbt_get_min_lat(q) == val)
555 		return count;
556 
557 	/*
558 	 * Ensure that the queue is idled, in case the latency update
559 	 * ends up either enabling or disabling wbt completely. We can't
560 	 * have IO inflight if that happens.
561 	 */
562 	blk_mq_quiesce_queue(q);
563 
564 	wbt_set_min_lat(q, val);
565 
566 	blk_mq_unquiesce_queue(q);
567 
568 	return count;
569 }
570 
571 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
572 #endif
573 
574 /* Common attributes for bio-based and request-based queues. */
575 static struct attribute *queue_attrs[] = {
576 	&queue_ra_entry.attr,
577 	&queue_max_hw_sectors_entry.attr,
578 	&queue_max_sectors_entry.attr,
579 	&queue_max_segments_entry.attr,
580 	&queue_max_discard_segments_entry.attr,
581 	&queue_max_integrity_segments_entry.attr,
582 	&queue_max_segment_size_entry.attr,
583 	&queue_hw_sector_size_entry.attr,
584 	&queue_logical_block_size_entry.attr,
585 	&queue_physical_block_size_entry.attr,
586 	&queue_chunk_sectors_entry.attr,
587 	&queue_io_min_entry.attr,
588 	&queue_io_opt_entry.attr,
589 	&queue_discard_granularity_entry.attr,
590 	&queue_max_discard_sectors_entry.attr,
591 	&queue_max_hw_discard_sectors_entry.attr,
592 	&queue_discard_zeroes_data_entry.attr,
593 	&queue_atomic_write_max_sectors_entry.attr,
594 	&queue_atomic_write_boundary_sectors_entry.attr,
595 	&queue_atomic_write_unit_min_entry.attr,
596 	&queue_atomic_write_unit_max_entry.attr,
597 	&queue_write_same_max_entry.attr,
598 	&queue_max_write_zeroes_sectors_entry.attr,
599 	&queue_max_zone_append_sectors_entry.attr,
600 	&queue_zone_write_granularity_entry.attr,
601 	&queue_rotational_entry.attr,
602 	&queue_zoned_entry.attr,
603 	&queue_nr_zones_entry.attr,
604 	&queue_max_open_zones_entry.attr,
605 	&queue_max_active_zones_entry.attr,
606 	&queue_nomerges_entry.attr,
607 	&queue_iostats_passthrough_entry.attr,
608 	&queue_iostats_entry.attr,
609 	&queue_stable_writes_entry.attr,
610 	&queue_add_random_entry.attr,
611 	&queue_poll_entry.attr,
612 	&queue_wc_entry.attr,
613 	&queue_fua_entry.attr,
614 	&queue_dax_entry.attr,
615 	&queue_poll_delay_entry.attr,
616 	&queue_virt_boundary_mask_entry.attr,
617 	&queue_dma_alignment_entry.attr,
618 	NULL,
619 };
620 
621 /* Request-based queue attributes that are not relevant for bio-based queues. */
622 static struct attribute *blk_mq_queue_attrs[] = {
623 	&queue_requests_entry.attr,
624 	&elv_iosched_entry.attr,
625 	&queue_rq_affinity_entry.attr,
626 	&queue_io_timeout_entry.attr,
627 #ifdef CONFIG_BLK_WBT
628 	&queue_wb_lat_entry.attr,
629 #endif
630 	NULL,
631 };
632 
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)633 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
634 				int n)
635 {
636 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
637 	struct request_queue *q = disk->queue;
638 
639 	if ((attr == &queue_max_open_zones_entry.attr ||
640 	     attr == &queue_max_active_zones_entry.attr) &&
641 	    !blk_queue_is_zoned(q))
642 		return 0;
643 
644 	return attr->mode;
645 }
646 
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)647 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
648 					 struct attribute *attr, int n)
649 {
650 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
651 	struct request_queue *q = disk->queue;
652 
653 	if (!queue_is_mq(q))
654 		return 0;
655 
656 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
657 		return 0;
658 
659 	return attr->mode;
660 }
661 
662 static struct attribute_group queue_attr_group = {
663 	.attrs = queue_attrs,
664 	.is_visible = queue_attr_visible,
665 };
666 
667 static struct attribute_group blk_mq_queue_attr_group = {
668 	.attrs = blk_mq_queue_attrs,
669 	.is_visible = blk_mq_queue_attr_visible,
670 };
671 
672 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
673 
674 static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)675 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
676 {
677 	struct queue_sysfs_entry *entry = to_queue(attr);
678 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
679 	ssize_t res;
680 
681 	if (!entry->show)
682 		return -EIO;
683 	mutex_lock(&disk->queue->sysfs_lock);
684 	res = entry->show(disk, page);
685 	mutex_unlock(&disk->queue->sysfs_lock);
686 	return res;
687 }
688 
689 static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)690 queue_attr_store(struct kobject *kobj, struct attribute *attr,
691 		    const char *page, size_t length)
692 {
693 	struct queue_sysfs_entry *entry = to_queue(attr);
694 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
695 	struct request_queue *q = disk->queue;
696 	ssize_t res;
697 
698 	if (!entry->store)
699 		return -EIO;
700 
701 	/*
702 	 * If the attribute needs to load a module, do it before freezing the
703 	 * queue to ensure that the module file can be read when the request
704 	 * queue is the one for the device storing the module file.
705 	 */
706 	if (entry->load_module)
707 		entry->load_module(disk, page, length);
708 
709 	blk_mq_freeze_queue(q);
710 	mutex_lock(&q->sysfs_lock);
711 	res = entry->store(disk, page, length);
712 	mutex_unlock(&q->sysfs_lock);
713 	blk_mq_unfreeze_queue(q);
714 	return res;
715 }
716 
717 static const struct sysfs_ops queue_sysfs_ops = {
718 	.show	= queue_attr_show,
719 	.store	= queue_attr_store,
720 };
721 
722 static const struct attribute_group *blk_queue_attr_groups[] = {
723 	&queue_attr_group,
724 	&blk_mq_queue_attr_group,
725 	NULL
726 };
727 
blk_queue_release(struct kobject * kobj)728 static void blk_queue_release(struct kobject *kobj)
729 {
730 	/* nothing to do here, all data is associated with the parent gendisk */
731 }
732 
733 static const struct kobj_type blk_queue_ktype = {
734 	.default_groups = blk_queue_attr_groups,
735 	.sysfs_ops	= &queue_sysfs_ops,
736 	.release	= blk_queue_release,
737 };
738 
blk_debugfs_remove(struct gendisk * disk)739 static void blk_debugfs_remove(struct gendisk *disk)
740 {
741 	struct request_queue *q = disk->queue;
742 
743 	mutex_lock(&q->debugfs_mutex);
744 	blk_trace_shutdown(q);
745 	debugfs_remove_recursive(q->debugfs_dir);
746 	q->debugfs_dir = NULL;
747 	q->sched_debugfs_dir = NULL;
748 	q->rqos_debugfs_dir = NULL;
749 	mutex_unlock(&q->debugfs_mutex);
750 }
751 
752 /**
753  * blk_register_queue - register a block layer queue with sysfs
754  * @disk: Disk of which the request queue should be registered with sysfs.
755  */
blk_register_queue(struct gendisk * disk)756 int blk_register_queue(struct gendisk *disk)
757 {
758 	struct request_queue *q = disk->queue;
759 	int ret;
760 
761 	mutex_lock(&q->sysfs_dir_lock);
762 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
763 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
764 	if (ret < 0)
765 		goto out_put_queue_kobj;
766 
767 	if (queue_is_mq(q)) {
768 		ret = blk_mq_sysfs_register(disk);
769 		if (ret)
770 			goto out_put_queue_kobj;
771 	}
772 	mutex_lock(&q->sysfs_lock);
773 
774 	mutex_lock(&q->debugfs_mutex);
775 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
776 	if (queue_is_mq(q))
777 		blk_mq_debugfs_register(q);
778 	mutex_unlock(&q->debugfs_mutex);
779 
780 	ret = disk_register_independent_access_ranges(disk);
781 	if (ret)
782 		goto out_debugfs_remove;
783 
784 	if (q->elevator) {
785 		ret = elv_register_queue(q, false);
786 		if (ret)
787 			goto out_unregister_ia_ranges;
788 	}
789 
790 	ret = blk_crypto_sysfs_register(disk);
791 	if (ret)
792 		goto out_elv_unregister;
793 
794 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
795 	wbt_enable_default(disk);
796 
797 	/* Now everything is ready and send out KOBJ_ADD uevent */
798 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
799 	if (q->elevator)
800 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
801 	mutex_unlock(&q->sysfs_lock);
802 	mutex_unlock(&q->sysfs_dir_lock);
803 
804 	/*
805 	 * SCSI probing may synchronously create and destroy a lot of
806 	 * request_queues for non-existent devices.  Shutting down a fully
807 	 * functional queue takes measureable wallclock time as RCU grace
808 	 * periods are involved.  To avoid excessive latency in these
809 	 * cases, a request_queue starts out in a degraded mode which is
810 	 * faster to shut down and is made fully functional here as
811 	 * request_queues for non-existent devices never get registered.
812 	 */
813 	blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
814 	percpu_ref_switch_to_percpu(&q->q_usage_counter);
815 
816 	return ret;
817 
818 out_elv_unregister:
819 	elv_unregister_queue(q);
820 out_unregister_ia_ranges:
821 	disk_unregister_independent_access_ranges(disk);
822 out_debugfs_remove:
823 	blk_debugfs_remove(disk);
824 	mutex_unlock(&q->sysfs_lock);
825 out_put_queue_kobj:
826 	kobject_put(&disk->queue_kobj);
827 	mutex_unlock(&q->sysfs_dir_lock);
828 	return ret;
829 }
830 
831 /**
832  * blk_unregister_queue - counterpart of blk_register_queue()
833  * @disk: Disk of which the request queue should be unregistered from sysfs.
834  *
835  * Note: the caller is responsible for guaranteeing that this function is called
836  * after blk_register_queue() has finished.
837  */
blk_unregister_queue(struct gendisk * disk)838 void blk_unregister_queue(struct gendisk *disk)
839 {
840 	struct request_queue *q = disk->queue;
841 
842 	if (WARN_ON(!q))
843 		return;
844 
845 	/* Return early if disk->queue was never registered. */
846 	if (!blk_queue_registered(q))
847 		return;
848 
849 	/*
850 	 * Since sysfs_remove_dir() prevents adding new directory entries
851 	 * before removal of existing entries starts, protect against
852 	 * concurrent elv_iosched_store() calls.
853 	 */
854 	mutex_lock(&q->sysfs_lock);
855 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
856 	mutex_unlock(&q->sysfs_lock);
857 
858 	mutex_lock(&q->sysfs_dir_lock);
859 	/*
860 	 * Remove the sysfs attributes before unregistering the queue data
861 	 * structures that can be modified through sysfs.
862 	 */
863 	if (queue_is_mq(q))
864 		blk_mq_sysfs_unregister(disk);
865 	blk_crypto_sysfs_unregister(disk);
866 
867 	mutex_lock(&q->sysfs_lock);
868 	elv_unregister_queue(q);
869 	disk_unregister_independent_access_ranges(disk);
870 	mutex_unlock(&q->sysfs_lock);
871 
872 	/* Now that we've deleted all child objects, we can delete the queue. */
873 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
874 	kobject_del(&disk->queue_kobj);
875 	mutex_unlock(&q->sysfs_dir_lock);
876 
877 	blk_debugfs_remove(disk);
878 }
879