xref: /linux/block/blk-sysfs.c (revision a3d14d1602ca11429d242d230c31af8f822f614f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct gendisk *disk, char *page);
26 	ssize_t (*show_limit)(struct gendisk *disk, char *page);
27 
28 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
29 	int (*store_limit)(struct gendisk *disk, const char *page,
30 			size_t count, struct queue_limits *lim);
31 };
32 
33 static ssize_t
queue_var_show(unsigned long var,char * page)34 queue_var_show(unsigned long var, char *page)
35 {
36 	return sysfs_emit(page, "%lu\n", var);
37 }
38 
39 static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)40 queue_var_store(unsigned long *var, const char *page, size_t count)
41 {
42 	int err;
43 	unsigned long v;
44 
45 	err = kstrtoul(page, 10, &v);
46 	if (err || v > UINT_MAX)
47 		return -EINVAL;
48 
49 	*var = v;
50 
51 	return count;
52 }
53 
queue_requests_show(struct gendisk * disk,char * page)54 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
55 {
56 	ssize_t ret;
57 
58 	mutex_lock(&disk->queue->elevator_lock);
59 	ret = queue_var_show(disk->queue->nr_requests, page);
60 	mutex_unlock(&disk->queue->elevator_lock);
61 	return ret;
62 }
63 
64 static ssize_t
queue_requests_store(struct gendisk * disk,const char * page,size_t count)65 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
66 {
67 	unsigned long nr;
68 	int ret, err;
69 	unsigned int memflags;
70 	struct request_queue *q = disk->queue;
71 
72 	if (!queue_is_mq(q))
73 		return -EINVAL;
74 
75 	ret = queue_var_store(&nr, page, count);
76 	if (ret < 0)
77 		return ret;
78 
79 	memflags = blk_mq_freeze_queue(q);
80 	mutex_lock(&q->elevator_lock);
81 	if (nr < BLKDEV_MIN_RQ)
82 		nr = BLKDEV_MIN_RQ;
83 
84 	err = blk_mq_update_nr_requests(disk->queue, nr);
85 	if (err)
86 		ret = err;
87 	mutex_unlock(&q->elevator_lock);
88 	blk_mq_unfreeze_queue(q, memflags);
89 	return ret;
90 }
91 
queue_ra_show(struct gendisk * disk,char * page)92 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
93 {
94 	ssize_t ret;
95 
96 	mutex_lock(&disk->queue->limits_lock);
97 	ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
98 	mutex_unlock(&disk->queue->limits_lock);
99 
100 	return ret;
101 }
102 
103 static ssize_t
queue_ra_store(struct gendisk * disk,const char * page,size_t count)104 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
105 {
106 	unsigned long ra_kb;
107 	ssize_t ret;
108 	unsigned int memflags;
109 	struct request_queue *q = disk->queue;
110 
111 	ret = queue_var_store(&ra_kb, page, count);
112 	if (ret < 0)
113 		return ret;
114 	/*
115 	 * ->ra_pages is protected by ->limits_lock because it is usually
116 	 * calculated from the queue limits by queue_limits_commit_update.
117 	 */
118 	mutex_lock(&q->limits_lock);
119 	memflags = blk_mq_freeze_queue(q);
120 	disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
121 	mutex_unlock(&q->limits_lock);
122 	blk_mq_unfreeze_queue(q, memflags);
123 
124 	return ret;
125 }
126 
127 #define QUEUE_SYSFS_LIMIT_SHOW(_field)					\
128 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
129 {									\
130 	return queue_var_show(disk->queue->limits._field, page);	\
131 }
132 
133 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)134 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
135 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
136 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
137 QUEUE_SYSFS_LIMIT_SHOW(max_write_streams)
138 QUEUE_SYSFS_LIMIT_SHOW(write_stream_granularity)
139 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
140 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
141 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
142 QUEUE_SYSFS_LIMIT_SHOW(io_min)
143 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
144 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
145 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
146 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
147 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
148 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
149 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
150 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
151 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
152 
153 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field)			\
154 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
155 {									\
156 	return sysfs_emit(page, "%llu\n",				\
157 		(unsigned long long)disk->queue->limits._field <<	\
158 			SECTOR_SHIFT);					\
159 }
160 
161 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
162 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
163 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
164 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
165 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
166 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
167 
168 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field)			\
169 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
170 {									\
171 	return queue_var_show(disk->queue->limits._field >> 1, page);	\
172 }
173 
174 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
175 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
176 
177 #define QUEUE_SYSFS_SHOW_CONST(_name, _val)				\
178 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
179 {									\
180 	return sysfs_emit(page, "%d\n", _val);				\
181 }
182 
183 /* deprecated fields */
184 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
185 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
186 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
187 
188 static int queue_max_discard_sectors_store(struct gendisk *disk,
189 		const char *page, size_t count, struct queue_limits *lim)
190 {
191 	unsigned long max_discard_bytes;
192 	ssize_t ret;
193 
194 	ret = queue_var_store(&max_discard_bytes, page, count);
195 	if (ret < 0)
196 		return ret;
197 
198 	if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
199 		return -EINVAL;
200 
201 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
202 		return -EINVAL;
203 
204 	lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
205 	return 0;
206 }
207 
208 static int
queue_max_sectors_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)209 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count,
210 		struct queue_limits *lim)
211 {
212 	unsigned long max_sectors_kb;
213 	ssize_t ret;
214 
215 	ret = queue_var_store(&max_sectors_kb, page, count);
216 	if (ret < 0)
217 		return ret;
218 
219 	lim->max_user_sectors = max_sectors_kb << 1;
220 	return 0;
221 }
222 
queue_feature_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim,blk_features_t feature)223 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
224 		size_t count, struct queue_limits *lim, blk_features_t feature)
225 {
226 	unsigned long val;
227 	ssize_t ret;
228 
229 	ret = queue_var_store(&val, page, count);
230 	if (ret < 0)
231 		return ret;
232 
233 	if (val)
234 		lim->features |= feature;
235 	else
236 		lim->features &= ~feature;
237 	return 0;
238 }
239 
240 #define QUEUE_SYSFS_FEATURE(_name, _feature)				\
241 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
242 {									\
243 	return sysfs_emit(page, "%u\n",					\
244 		!!(disk->queue->limits.features & _feature));		\
245 }									\
246 static int queue_##_name##_store(struct gendisk *disk,			\
247 		const char *page, size_t count, struct queue_limits *lim) \
248 {									\
249 	return queue_feature_store(disk, page, count, lim, _feature);	\
250 }
251 
252 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
253 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
254 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
255 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
256 
257 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature)			\
258 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
259 {									\
260 	return sysfs_emit(page, "%u\n",					\
261 		!!(disk->queue->limits.features & _feature));		\
262 }
263 
264 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
265 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
266 
queue_poll_show(struct gendisk * disk,char * page)267 static ssize_t queue_poll_show(struct gendisk *disk, char *page)
268 {
269 	if (queue_is_mq(disk->queue))
270 		return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
271 
272 	return sysfs_emit(page, "%u\n",
273 			!!(disk->queue->limits.features & BLK_FEAT_POLL));
274 }
275 
queue_zoned_show(struct gendisk * disk,char * page)276 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
277 {
278 	if (blk_queue_is_zoned(disk->queue))
279 		return sysfs_emit(page, "host-managed\n");
280 	return sysfs_emit(page, "none\n");
281 }
282 
queue_nr_zones_show(struct gendisk * disk,char * page)283 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
284 {
285 	return queue_var_show(disk_nr_zones(disk), page);
286 }
287 
queue_iostats_passthrough_show(struct gendisk * disk,char * page)288 static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
289 {
290 	return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
291 }
292 
queue_iostats_passthrough_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)293 static int queue_iostats_passthrough_store(struct gendisk *disk,
294 		const char *page, size_t count, struct queue_limits *lim)
295 {
296 	unsigned long ios;
297 	ssize_t ret;
298 
299 	ret = queue_var_store(&ios, page, count);
300 	if (ret < 0)
301 		return ret;
302 
303 	if (ios)
304 		lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
305 	else
306 		lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
307 	return 0;
308 }
309 
queue_nomerges_show(struct gendisk * disk,char * page)310 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
311 {
312 	return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
313 			       blk_queue_noxmerges(disk->queue), page);
314 }
315 
queue_nomerges_store(struct gendisk * disk,const char * page,size_t count)316 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
317 				    size_t count)
318 {
319 	unsigned long nm;
320 	unsigned int memflags;
321 	struct request_queue *q = disk->queue;
322 	ssize_t ret = queue_var_store(&nm, page, count);
323 
324 	if (ret < 0)
325 		return ret;
326 
327 	memflags = blk_mq_freeze_queue(q);
328 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
329 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
330 	if (nm == 2)
331 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
332 	else if (nm)
333 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
334 	blk_mq_unfreeze_queue(q, memflags);
335 
336 	return ret;
337 }
338 
queue_rq_affinity_show(struct gendisk * disk,char * page)339 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
340 {
341 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
342 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
343 
344 	return queue_var_show(set << force, page);
345 }
346 
347 static ssize_t
queue_rq_affinity_store(struct gendisk * disk,const char * page,size_t count)348 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
349 {
350 	ssize_t ret = -EINVAL;
351 #ifdef CONFIG_SMP
352 	struct request_queue *q = disk->queue;
353 	unsigned long val;
354 	unsigned int memflags;
355 
356 	ret = queue_var_store(&val, page, count);
357 	if (ret < 0)
358 		return ret;
359 
360 	/*
361 	 * Here we update two queue flags each using atomic bitops, although
362 	 * updating two flags isn't atomic it should be harmless as those flags
363 	 * are accessed individually using atomic test_bit operation. So we
364 	 * don't grab any lock while updating these flags.
365 	 */
366 	memflags = blk_mq_freeze_queue(q);
367 	if (val == 2) {
368 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
369 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
370 	} else if (val == 1) {
371 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
372 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
373 	} else if (val == 0) {
374 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
375 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
376 	}
377 	blk_mq_unfreeze_queue(q, memflags);
378 #endif
379 	return ret;
380 }
381 
queue_poll_delay_store(struct gendisk * disk,const char * page,size_t count)382 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
383 				size_t count)
384 {
385 	return count;
386 }
387 
queue_poll_store(struct gendisk * disk,const char * page,size_t count)388 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
389 				size_t count)
390 {
391 	unsigned int memflags;
392 	ssize_t ret = count;
393 	struct request_queue *q = disk->queue;
394 
395 	memflags = blk_mq_freeze_queue(q);
396 	if (!(q->limits.features & BLK_FEAT_POLL)) {
397 		ret = -EINVAL;
398 		goto out;
399 	}
400 
401 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
402 	pr_info_ratelimited("please use driver specific parameters instead.\n");
403 out:
404 	blk_mq_unfreeze_queue(q, memflags);
405 	return ret;
406 }
407 
queue_io_timeout_show(struct gendisk * disk,char * page)408 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
409 {
410 	return sysfs_emit(page, "%u\n",
411 			jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout)));
412 }
413 
queue_io_timeout_store(struct gendisk * disk,const char * page,size_t count)414 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
415 				  size_t count)
416 {
417 	unsigned int val, memflags;
418 	int err;
419 	struct request_queue *q = disk->queue;
420 
421 	err = kstrtou32(page, 10, &val);
422 	if (err || val == 0)
423 		return -EINVAL;
424 
425 	memflags = blk_mq_freeze_queue(q);
426 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
427 	blk_mq_unfreeze_queue(q, memflags);
428 
429 	return count;
430 }
431 
queue_wc_show(struct gendisk * disk,char * page)432 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
433 {
434 	if (blk_queue_write_cache(disk->queue))
435 		return sysfs_emit(page, "write back\n");
436 	return sysfs_emit(page, "write through\n");
437 }
438 
queue_wc_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)439 static int queue_wc_store(struct gendisk *disk, const char *page,
440 		size_t count, struct queue_limits *lim)
441 {
442 	bool disable;
443 
444 	if (!strncmp(page, "write back", 10)) {
445 		disable = false;
446 	} else if (!strncmp(page, "write through", 13) ||
447 		   !strncmp(page, "none", 4)) {
448 		disable = true;
449 	} else {
450 		return -EINVAL;
451 	}
452 
453 	if (disable)
454 		lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
455 	else
456 		lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
457 	return 0;
458 }
459 
460 #define QUEUE_RO_ENTRY(_prefix, _name)			\
461 static struct queue_sysfs_entry _prefix##_entry = {	\
462 	.attr	= { .name = _name, .mode = 0444 },	\
463 	.show	= _prefix##_show,			\
464 };
465 
466 #define QUEUE_RW_ENTRY(_prefix, _name)			\
467 static struct queue_sysfs_entry _prefix##_entry = {	\
468 	.attr	= { .name = _name, .mode = 0644 },	\
469 	.show	= _prefix##_show,			\
470 	.store	= _prefix##_store,			\
471 };
472 
473 #define QUEUE_LIM_RO_ENTRY(_prefix, _name)			\
474 static struct queue_sysfs_entry _prefix##_entry = {	\
475 	.attr		= { .name = _name, .mode = 0444 },	\
476 	.show_limit	= _prefix##_show,			\
477 }
478 
479 #define QUEUE_LIM_RW_ENTRY(_prefix, _name)			\
480 static struct queue_sysfs_entry _prefix##_entry = {	\
481 	.attr		= { .name = _name, .mode = 0644 },	\
482 	.show_limit	= _prefix##_show,			\
483 	.store_limit	= _prefix##_store,			\
484 }
485 
486 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
487 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
488 QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
489 QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
490 QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments");
491 QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
492 QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size");
493 QUEUE_LIM_RO_ENTRY(queue_max_write_streams, "max_write_streams");
494 QUEUE_LIM_RO_ENTRY(queue_write_stream_granularity, "write_stream_granularity");
495 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
496 
497 QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size");
498 QUEUE_LIM_RO_ENTRY(queue_physical_block_size, "physical_block_size");
499 QUEUE_LIM_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
500 QUEUE_LIM_RO_ENTRY(queue_io_min, "minimum_io_size");
501 QUEUE_LIM_RO_ENTRY(queue_io_opt, "optimal_io_size");
502 
503 QUEUE_LIM_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
504 QUEUE_LIM_RO_ENTRY(queue_discard_granularity, "discard_granularity");
505 QUEUE_LIM_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
506 QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
507 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
508 
509 QUEUE_LIM_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
510 QUEUE_LIM_RO_ENTRY(queue_atomic_write_boundary_sectors,
511 		"atomic_write_boundary_bytes");
512 QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
513 QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
514 
515 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
516 QUEUE_LIM_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
517 QUEUE_LIM_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
518 QUEUE_LIM_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
519 
520 QUEUE_LIM_RO_ENTRY(queue_zoned, "zoned");
521 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
522 QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones");
523 QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones");
524 
525 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
526 QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
527 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
528 QUEUE_RW_ENTRY(queue_poll, "io_poll");
529 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
530 QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
531 QUEUE_LIM_RO_ENTRY(queue_fua, "fua");
532 QUEUE_LIM_RO_ENTRY(queue_dax, "dax");
533 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
534 QUEUE_LIM_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
535 QUEUE_LIM_RO_ENTRY(queue_dma_alignment, "dma_alignment");
536 
537 /* legacy alias for logical_block_size: */
538 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
539 	.attr		= {.name = "hw_sector_size", .mode = 0444 },
540 	.show_limit	= queue_logical_block_size_show,
541 };
542 
543 QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational");
544 QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats");
545 QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random");
546 QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes");
547 
548 #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)549 static ssize_t queue_var_store64(s64 *var, const char *page)
550 {
551 	int err;
552 	s64 v;
553 
554 	err = kstrtos64(page, 10, &v);
555 	if (err < 0)
556 		return err;
557 
558 	*var = v;
559 	return 0;
560 }
561 
queue_wb_lat_show(struct gendisk * disk,char * page)562 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
563 {
564 	ssize_t ret;
565 	struct request_queue *q = disk->queue;
566 
567 	mutex_lock(&disk->rqos_state_mutex);
568 	if (!wbt_rq_qos(q)) {
569 		ret = -EINVAL;
570 		goto out;
571 	}
572 
573 	if (wbt_disabled(q)) {
574 		ret = sysfs_emit(page, "0\n");
575 		goto out;
576 	}
577 
578 	ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
579 out:
580 	mutex_unlock(&disk->rqos_state_mutex);
581 	return ret;
582 }
583 
queue_wb_lat_store(struct gendisk * disk,const char * page,size_t count)584 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
585 				  size_t count)
586 {
587 	struct request_queue *q = disk->queue;
588 	struct rq_qos *rqos;
589 	ssize_t ret;
590 	s64 val;
591 	unsigned int memflags;
592 
593 	ret = queue_var_store64(&val, page);
594 	if (ret < 0)
595 		return ret;
596 	if (val < -1)
597 		return -EINVAL;
598 
599 	memflags = blk_mq_freeze_queue(q);
600 
601 	rqos = wbt_rq_qos(q);
602 	if (!rqos) {
603 		ret = wbt_init(disk);
604 		if (ret)
605 			goto out;
606 	}
607 
608 	ret = count;
609 	if (val == -1)
610 		val = wbt_default_latency_nsec(q);
611 	else if (val >= 0)
612 		val *= 1000ULL;
613 
614 	if (wbt_get_min_lat(q) == val)
615 		goto out;
616 
617 	/*
618 	 * Ensure that the queue is idled, in case the latency update
619 	 * ends up either enabling or disabling wbt completely. We can't
620 	 * have IO inflight if that happens.
621 	 */
622 	blk_mq_quiesce_queue(q);
623 
624 	mutex_lock(&disk->rqos_state_mutex);
625 	wbt_set_min_lat(q, val);
626 	mutex_unlock(&disk->rqos_state_mutex);
627 
628 	blk_mq_unquiesce_queue(q);
629 out:
630 	blk_mq_unfreeze_queue(q, memflags);
631 
632 	return ret;
633 }
634 
635 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
636 #endif
637 
638 /* Common attributes for bio-based and request-based queues. */
639 static struct attribute *queue_attrs[] = {
640 	/*
641 	 * Attributes which are protected with q->limits_lock.
642 	 */
643 	&queue_max_hw_sectors_entry.attr,
644 	&queue_max_sectors_entry.attr,
645 	&queue_max_segments_entry.attr,
646 	&queue_max_discard_segments_entry.attr,
647 	&queue_max_integrity_segments_entry.attr,
648 	&queue_max_segment_size_entry.attr,
649 	&queue_max_write_streams_entry.attr,
650 	&queue_write_stream_granularity_entry.attr,
651 	&queue_hw_sector_size_entry.attr,
652 	&queue_logical_block_size_entry.attr,
653 	&queue_physical_block_size_entry.attr,
654 	&queue_chunk_sectors_entry.attr,
655 	&queue_io_min_entry.attr,
656 	&queue_io_opt_entry.attr,
657 	&queue_discard_granularity_entry.attr,
658 	&queue_max_discard_sectors_entry.attr,
659 	&queue_max_hw_discard_sectors_entry.attr,
660 	&queue_atomic_write_max_sectors_entry.attr,
661 	&queue_atomic_write_boundary_sectors_entry.attr,
662 	&queue_atomic_write_unit_min_entry.attr,
663 	&queue_atomic_write_unit_max_entry.attr,
664 	&queue_max_write_zeroes_sectors_entry.attr,
665 	&queue_max_zone_append_sectors_entry.attr,
666 	&queue_zone_write_granularity_entry.attr,
667 	&queue_rotational_entry.attr,
668 	&queue_zoned_entry.attr,
669 	&queue_max_open_zones_entry.attr,
670 	&queue_max_active_zones_entry.attr,
671 	&queue_iostats_passthrough_entry.attr,
672 	&queue_iostats_entry.attr,
673 	&queue_stable_writes_entry.attr,
674 	&queue_add_random_entry.attr,
675 	&queue_wc_entry.attr,
676 	&queue_fua_entry.attr,
677 	&queue_dax_entry.attr,
678 	&queue_virt_boundary_mask_entry.attr,
679 	&queue_dma_alignment_entry.attr,
680 	&queue_ra_entry.attr,
681 
682 	/*
683 	 * Attributes which don't require locking.
684 	 */
685 	&queue_discard_zeroes_data_entry.attr,
686 	&queue_write_same_max_entry.attr,
687 	&queue_nr_zones_entry.attr,
688 	&queue_nomerges_entry.attr,
689 	&queue_poll_entry.attr,
690 	&queue_poll_delay_entry.attr,
691 
692 	NULL,
693 };
694 
695 /* Request-based queue attributes that are not relevant for bio-based queues. */
696 static struct attribute *blk_mq_queue_attrs[] = {
697 	/*
698 	 * Attributes which require some form of locking other than
699 	 * q->sysfs_lock.
700 	 */
701 	&elv_iosched_entry.attr,
702 	&queue_requests_entry.attr,
703 #ifdef CONFIG_BLK_WBT
704 	&queue_wb_lat_entry.attr,
705 #endif
706 	/*
707 	 * Attributes which don't require locking.
708 	 */
709 	&queue_rq_affinity_entry.attr,
710 	&queue_io_timeout_entry.attr,
711 
712 	NULL,
713 };
714 
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)715 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
716 				int n)
717 {
718 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
719 	struct request_queue *q = disk->queue;
720 
721 	if ((attr == &queue_max_open_zones_entry.attr ||
722 	     attr == &queue_max_active_zones_entry.attr) &&
723 	    !blk_queue_is_zoned(q))
724 		return 0;
725 
726 	return attr->mode;
727 }
728 
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)729 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
730 					 struct attribute *attr, int n)
731 {
732 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
733 	struct request_queue *q = disk->queue;
734 
735 	if (!queue_is_mq(q))
736 		return 0;
737 
738 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
739 		return 0;
740 
741 	return attr->mode;
742 }
743 
744 static struct attribute_group queue_attr_group = {
745 	.attrs = queue_attrs,
746 	.is_visible = queue_attr_visible,
747 };
748 
749 static struct attribute_group blk_mq_queue_attr_group = {
750 	.attrs = blk_mq_queue_attrs,
751 	.is_visible = blk_mq_queue_attr_visible,
752 };
753 
754 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
755 
756 static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)757 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
758 {
759 	struct queue_sysfs_entry *entry = to_queue(attr);
760 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
761 
762 	if (!entry->show && !entry->show_limit)
763 		return -EIO;
764 
765 	if (entry->show_limit) {
766 		ssize_t res;
767 
768 		mutex_lock(&disk->queue->limits_lock);
769 		res = entry->show_limit(disk, page);
770 		mutex_unlock(&disk->queue->limits_lock);
771 		return res;
772 	}
773 
774 	return entry->show(disk, page);
775 }
776 
777 static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)778 queue_attr_store(struct kobject *kobj, struct attribute *attr,
779 		    const char *page, size_t length)
780 {
781 	struct queue_sysfs_entry *entry = to_queue(attr);
782 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
783 	struct request_queue *q = disk->queue;
784 
785 	if (!entry->store_limit && !entry->store)
786 		return -EIO;
787 
788 	if (entry->store_limit) {
789 		ssize_t res;
790 
791 		struct queue_limits lim = queue_limits_start_update(q);
792 
793 		res = entry->store_limit(disk, page, length, &lim);
794 		if (res < 0) {
795 			queue_limits_cancel_update(q);
796 			return res;
797 		}
798 
799 		res = queue_limits_commit_update_frozen(q, &lim);
800 		if (res)
801 			return res;
802 		return length;
803 	}
804 
805 	return entry->store(disk, page, length);
806 }
807 
808 static const struct sysfs_ops queue_sysfs_ops = {
809 	.show	= queue_attr_show,
810 	.store	= queue_attr_store,
811 };
812 
813 static const struct attribute_group *blk_queue_attr_groups[] = {
814 	&queue_attr_group,
815 	&blk_mq_queue_attr_group,
816 	NULL
817 };
818 
blk_queue_release(struct kobject * kobj)819 static void blk_queue_release(struct kobject *kobj)
820 {
821 	/* nothing to do here, all data is associated with the parent gendisk */
822 }
823 
824 static const struct kobj_type blk_queue_ktype = {
825 	.default_groups = blk_queue_attr_groups,
826 	.sysfs_ops	= &queue_sysfs_ops,
827 	.release	= blk_queue_release,
828 };
829 
blk_debugfs_remove(struct gendisk * disk)830 static void blk_debugfs_remove(struct gendisk *disk)
831 {
832 	struct request_queue *q = disk->queue;
833 
834 	mutex_lock(&q->debugfs_mutex);
835 	blk_trace_shutdown(q);
836 	debugfs_remove_recursive(q->debugfs_dir);
837 	q->debugfs_dir = NULL;
838 	q->sched_debugfs_dir = NULL;
839 	q->rqos_debugfs_dir = NULL;
840 	mutex_unlock(&q->debugfs_mutex);
841 }
842 
843 /**
844  * blk_register_queue - register a block layer queue with sysfs
845  * @disk: Disk of which the request queue should be registered with sysfs.
846  */
blk_register_queue(struct gendisk * disk)847 int blk_register_queue(struct gendisk *disk)
848 {
849 	struct request_queue *q = disk->queue;
850 	int ret;
851 
852 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
853 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
854 	if (ret < 0)
855 		goto out_put_queue_kobj;
856 
857 	if (queue_is_mq(q)) {
858 		ret = blk_mq_sysfs_register(disk);
859 		if (ret)
860 			goto out_put_queue_kobj;
861 	}
862 	mutex_lock(&q->sysfs_lock);
863 
864 	mutex_lock(&q->debugfs_mutex);
865 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
866 	if (queue_is_mq(q))
867 		blk_mq_debugfs_register(q);
868 	mutex_unlock(&q->debugfs_mutex);
869 
870 	ret = disk_register_independent_access_ranges(disk);
871 	if (ret)
872 		goto out_debugfs_remove;
873 
874 	ret = blk_crypto_sysfs_register(disk);
875 	if (ret)
876 		goto out_unregister_ia_ranges;
877 
878 	if (queue_is_mq(q))
879 		elevator_set_default(q);
880 	wbt_enable_default(disk);
881 
882 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
883 
884 	/* Now everything is ready and send out KOBJ_ADD uevent */
885 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
886 	if (q->elevator)
887 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
888 	mutex_unlock(&q->sysfs_lock);
889 
890 	/*
891 	 * SCSI probing may synchronously create and destroy a lot of
892 	 * request_queues for non-existent devices.  Shutting down a fully
893 	 * functional queue takes measureable wallclock time as RCU grace
894 	 * periods are involved.  To avoid excessive latency in these
895 	 * cases, a request_queue starts out in a degraded mode which is
896 	 * faster to shut down and is made fully functional here as
897 	 * request_queues for non-existent devices never get registered.
898 	 */
899 	blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
900 	percpu_ref_switch_to_percpu(&q->q_usage_counter);
901 
902 	return ret;
903 
904 out_unregister_ia_ranges:
905 	disk_unregister_independent_access_ranges(disk);
906 out_debugfs_remove:
907 	blk_debugfs_remove(disk);
908 	mutex_unlock(&q->sysfs_lock);
909 	if (queue_is_mq(q))
910 		blk_mq_sysfs_unregister(disk);
911 out_put_queue_kobj:
912 	kobject_put(&disk->queue_kobj);
913 	return ret;
914 }
915 
916 /**
917  * blk_unregister_queue - counterpart of blk_register_queue()
918  * @disk: Disk of which the request queue should be unregistered from sysfs.
919  *
920  * Note: the caller is responsible for guaranteeing that this function is called
921  * after blk_register_queue() has finished.
922  */
blk_unregister_queue(struct gendisk * disk)923 void blk_unregister_queue(struct gendisk *disk)
924 {
925 	struct request_queue *q = disk->queue;
926 
927 	if (WARN_ON(!q))
928 		return;
929 
930 	/* Return early if disk->queue was never registered. */
931 	if (!blk_queue_registered(q))
932 		return;
933 
934 	/*
935 	 * Since sysfs_remove_dir() prevents adding new directory entries
936 	 * before removal of existing entries starts, protect against
937 	 * concurrent elv_iosched_store() calls.
938 	 */
939 	mutex_lock(&q->sysfs_lock);
940 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
941 	mutex_unlock(&q->sysfs_lock);
942 
943 	/*
944 	 * Remove the sysfs attributes before unregistering the queue data
945 	 * structures that can be modified through sysfs.
946 	 */
947 	if (queue_is_mq(q))
948 		blk_mq_sysfs_unregister(disk);
949 	blk_crypto_sysfs_unregister(disk);
950 
951 	mutex_lock(&q->sysfs_lock);
952 	disk_unregister_independent_access_ranges(disk);
953 	mutex_unlock(&q->sysfs_lock);
954 
955 	/* Now that we've deleted all child objects, we can delete the queue. */
956 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
957 	kobject_del(&disk->queue_kobj);
958 
959 	if (queue_is_mq(q))
960 		elevator_set_none(q);
961 
962 	blk_debugfs_remove(disk);
963 }
964