xref: /linux/block/blk-sysfs.c (revision 9b960d8cd6f712cb2c03e2bdd4d5ca058238037f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct gendisk *disk, char *page);
26 	ssize_t (*show_limit)(struct gendisk *disk, char *page);
27 
28 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
29 	int (*store_limit)(struct gendisk *disk, const char *page,
30 			size_t count, struct queue_limits *lim);
31 };
32 
33 static ssize_t
queue_var_show(unsigned long var,char * page)34 queue_var_show(unsigned long var, char *page)
35 {
36 	return sysfs_emit(page, "%lu\n", var);
37 }
38 
39 static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)40 queue_var_store(unsigned long *var, const char *page, size_t count)
41 {
42 	int err;
43 	unsigned long v;
44 
45 	err = kstrtoul(page, 10, &v);
46 	if (err || v > UINT_MAX)
47 		return -EINVAL;
48 
49 	*var = v;
50 
51 	return count;
52 }
53 
queue_requests_show(struct gendisk * disk,char * page)54 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
55 {
56 	ssize_t ret;
57 
58 	mutex_lock(&disk->queue->elevator_lock);
59 	ret = queue_var_show(disk->queue->nr_requests, page);
60 	mutex_unlock(&disk->queue->elevator_lock);
61 	return ret;
62 }
63 
64 static ssize_t
queue_requests_store(struct gendisk * disk,const char * page,size_t count)65 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
66 {
67 	unsigned long nr;
68 	int ret, err;
69 	unsigned int memflags;
70 	struct request_queue *q = disk->queue;
71 
72 	if (!queue_is_mq(q))
73 		return -EINVAL;
74 
75 	ret = queue_var_store(&nr, page, count);
76 	if (ret < 0)
77 		return ret;
78 
79 	memflags = blk_mq_freeze_queue(q);
80 	mutex_lock(&q->elevator_lock);
81 	if (nr < BLKDEV_MIN_RQ)
82 		nr = BLKDEV_MIN_RQ;
83 
84 	err = blk_mq_update_nr_requests(disk->queue, nr);
85 	if (err)
86 		ret = err;
87 	mutex_unlock(&q->elevator_lock);
88 	blk_mq_unfreeze_queue(q, memflags);
89 	return ret;
90 }
91 
queue_ra_show(struct gendisk * disk,char * page)92 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
93 {
94 	ssize_t ret;
95 
96 	mutex_lock(&disk->queue->limits_lock);
97 	ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
98 	mutex_unlock(&disk->queue->limits_lock);
99 
100 	return ret;
101 }
102 
103 static ssize_t
queue_ra_store(struct gendisk * disk,const char * page,size_t count)104 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
105 {
106 	unsigned long ra_kb;
107 	ssize_t ret;
108 	unsigned int memflags;
109 	struct request_queue *q = disk->queue;
110 
111 	ret = queue_var_store(&ra_kb, page, count);
112 	if (ret < 0)
113 		return ret;
114 	/*
115 	 * ->ra_pages is protected by ->limits_lock because it is usually
116 	 * calculated from the queue limits by queue_limits_commit_update.
117 	 */
118 	mutex_lock(&q->limits_lock);
119 	memflags = blk_mq_freeze_queue(q);
120 	disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
121 	mutex_unlock(&q->limits_lock);
122 	blk_mq_unfreeze_queue(q, memflags);
123 
124 	return ret;
125 }
126 
127 #define QUEUE_SYSFS_LIMIT_SHOW(_field)					\
128 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
129 {									\
130 	return queue_var_show(disk->queue->limits._field, page);	\
131 }
132 
133 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)134 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
135 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
136 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
137 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
138 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
139 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
140 QUEUE_SYSFS_LIMIT_SHOW(io_min)
141 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
142 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
143 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
144 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
145 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
146 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
147 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
148 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
149 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
150 
151 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field)			\
152 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
153 {									\
154 	return sysfs_emit(page, "%llu\n",				\
155 		(unsigned long long)disk->queue->limits._field <<	\
156 			SECTOR_SHIFT);					\
157 }
158 
159 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
160 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
161 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
162 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
163 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
164 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
165 
166 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field)			\
167 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
168 {									\
169 	return queue_var_show(disk->queue->limits._field >> 1, page);	\
170 }
171 
172 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
173 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
174 
175 #define QUEUE_SYSFS_SHOW_CONST(_name, _val)				\
176 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
177 {									\
178 	return sysfs_emit(page, "%d\n", _val);				\
179 }
180 
181 /* deprecated fields */
182 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
183 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
184 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
185 
186 static int queue_max_discard_sectors_store(struct gendisk *disk,
187 		const char *page, size_t count, struct queue_limits *lim)
188 {
189 	unsigned long max_discard_bytes;
190 	ssize_t ret;
191 
192 	ret = queue_var_store(&max_discard_bytes, page, count);
193 	if (ret < 0)
194 		return ret;
195 
196 	if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
197 		return -EINVAL;
198 
199 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
200 		return -EINVAL;
201 
202 	lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
203 	return 0;
204 }
205 
206 static int
queue_max_sectors_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)207 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count,
208 		struct queue_limits *lim)
209 {
210 	unsigned long max_sectors_kb;
211 	ssize_t ret;
212 
213 	ret = queue_var_store(&max_sectors_kb, page, count);
214 	if (ret < 0)
215 		return ret;
216 
217 	lim->max_user_sectors = max_sectors_kb << 1;
218 	return 0;
219 }
220 
queue_feature_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim,blk_features_t feature)221 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
222 		size_t count, struct queue_limits *lim, blk_features_t feature)
223 {
224 	unsigned long val;
225 	ssize_t ret;
226 
227 	ret = queue_var_store(&val, page, count);
228 	if (ret < 0)
229 		return ret;
230 
231 	if (val)
232 		lim->features |= feature;
233 	else
234 		lim->features &= ~feature;
235 	return 0;
236 }
237 
238 #define QUEUE_SYSFS_FEATURE(_name, _feature)				\
239 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
240 {									\
241 	return sysfs_emit(page, "%u\n",					\
242 		!!(disk->queue->limits.features & _feature));		\
243 }									\
244 static int queue_##_name##_store(struct gendisk *disk,			\
245 		const char *page, size_t count, struct queue_limits *lim) \
246 {									\
247 	return queue_feature_store(disk, page, count, lim, _feature);	\
248 }
249 
250 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
251 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
252 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
253 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
254 
255 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature)			\
256 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
257 {									\
258 	return sysfs_emit(page, "%u\n",					\
259 		!!(disk->queue->limits.features & _feature));		\
260 }
261 
262 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
263 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
264 
queue_poll_show(struct gendisk * disk,char * page)265 static ssize_t queue_poll_show(struct gendisk *disk, char *page)
266 {
267 	if (queue_is_mq(disk->queue))
268 		return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
269 
270 	return sysfs_emit(page, "%u\n",
271 			!!(disk->queue->limits.features & BLK_FEAT_POLL));
272 }
273 
queue_zoned_show(struct gendisk * disk,char * page)274 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
275 {
276 	if (blk_queue_is_zoned(disk->queue))
277 		return sysfs_emit(page, "host-managed\n");
278 	return sysfs_emit(page, "none\n");
279 }
280 
queue_nr_zones_show(struct gendisk * disk,char * page)281 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
282 {
283 	return queue_var_show(disk_nr_zones(disk), page);
284 }
285 
queue_iostats_passthrough_show(struct gendisk * disk,char * page)286 static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
287 {
288 	return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
289 }
290 
queue_iostats_passthrough_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)291 static int queue_iostats_passthrough_store(struct gendisk *disk,
292 		const char *page, size_t count, struct queue_limits *lim)
293 {
294 	unsigned long ios;
295 	ssize_t ret;
296 
297 	ret = queue_var_store(&ios, page, count);
298 	if (ret < 0)
299 		return ret;
300 
301 	if (ios)
302 		lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
303 	else
304 		lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
305 	return 0;
306 }
307 
queue_nomerges_show(struct gendisk * disk,char * page)308 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
309 {
310 	return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
311 			       blk_queue_noxmerges(disk->queue), page);
312 }
313 
queue_nomerges_store(struct gendisk * disk,const char * page,size_t count)314 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
315 				    size_t count)
316 {
317 	unsigned long nm;
318 	unsigned int memflags;
319 	struct request_queue *q = disk->queue;
320 	ssize_t ret = queue_var_store(&nm, page, count);
321 
322 	if (ret < 0)
323 		return ret;
324 
325 	memflags = blk_mq_freeze_queue(q);
326 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
327 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
328 	if (nm == 2)
329 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
330 	else if (nm)
331 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
332 	blk_mq_unfreeze_queue(q, memflags);
333 
334 	return ret;
335 }
336 
queue_rq_affinity_show(struct gendisk * disk,char * page)337 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
338 {
339 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
340 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
341 
342 	return queue_var_show(set << force, page);
343 }
344 
345 static ssize_t
queue_rq_affinity_store(struct gendisk * disk,const char * page,size_t count)346 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
347 {
348 	ssize_t ret = -EINVAL;
349 #ifdef CONFIG_SMP
350 	struct request_queue *q = disk->queue;
351 	unsigned long val;
352 	unsigned int memflags;
353 
354 	ret = queue_var_store(&val, page, count);
355 	if (ret < 0)
356 		return ret;
357 
358 	/*
359 	 * Here we update two queue flags each using atomic bitops, although
360 	 * updating two flags isn't atomic it should be harmless as those flags
361 	 * are accessed individually using atomic test_bit operation. So we
362 	 * don't grab any lock while updating these flags.
363 	 */
364 	memflags = blk_mq_freeze_queue(q);
365 	if (val == 2) {
366 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
367 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
368 	} else if (val == 1) {
369 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
370 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
371 	} else if (val == 0) {
372 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
373 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
374 	}
375 	blk_mq_unfreeze_queue(q, memflags);
376 #endif
377 	return ret;
378 }
379 
queue_poll_delay_store(struct gendisk * disk,const char * page,size_t count)380 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
381 				size_t count)
382 {
383 	return count;
384 }
385 
queue_poll_store(struct gendisk * disk,const char * page,size_t count)386 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
387 				size_t count)
388 {
389 	unsigned int memflags;
390 	ssize_t ret = count;
391 	struct request_queue *q = disk->queue;
392 
393 	memflags = blk_mq_freeze_queue(q);
394 	if (!(q->limits.features & BLK_FEAT_POLL)) {
395 		ret = -EINVAL;
396 		goto out;
397 	}
398 
399 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
400 	pr_info_ratelimited("please use driver specific parameters instead.\n");
401 out:
402 	blk_mq_unfreeze_queue(q, memflags);
403 	return ret;
404 }
405 
queue_io_timeout_show(struct gendisk * disk,char * page)406 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
407 {
408 	return sysfs_emit(page, "%u\n",
409 			jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout)));
410 }
411 
queue_io_timeout_store(struct gendisk * disk,const char * page,size_t count)412 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
413 				  size_t count)
414 {
415 	unsigned int val, memflags;
416 	int err;
417 	struct request_queue *q = disk->queue;
418 
419 	err = kstrtou32(page, 10, &val);
420 	if (err || val == 0)
421 		return -EINVAL;
422 
423 	memflags = blk_mq_freeze_queue(q);
424 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
425 	blk_mq_unfreeze_queue(q, memflags);
426 
427 	return count;
428 }
429 
queue_wc_show(struct gendisk * disk,char * page)430 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
431 {
432 	if (blk_queue_write_cache(disk->queue))
433 		return sysfs_emit(page, "write back\n");
434 	return sysfs_emit(page, "write through\n");
435 }
436 
queue_wc_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)437 static int queue_wc_store(struct gendisk *disk, const char *page,
438 		size_t count, struct queue_limits *lim)
439 {
440 	bool disable;
441 
442 	if (!strncmp(page, "write back", 10)) {
443 		disable = false;
444 	} else if (!strncmp(page, "write through", 13) ||
445 		   !strncmp(page, "none", 4)) {
446 		disable = true;
447 	} else {
448 		return -EINVAL;
449 	}
450 
451 	if (disable)
452 		lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
453 	else
454 		lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
455 	return 0;
456 }
457 
458 #define QUEUE_RO_ENTRY(_prefix, _name)			\
459 static struct queue_sysfs_entry _prefix##_entry = {	\
460 	.attr	= { .name = _name, .mode = 0444 },	\
461 	.show	= _prefix##_show,			\
462 };
463 
464 #define QUEUE_RW_ENTRY(_prefix, _name)			\
465 static struct queue_sysfs_entry _prefix##_entry = {	\
466 	.attr	= { .name = _name, .mode = 0644 },	\
467 	.show	= _prefix##_show,			\
468 	.store	= _prefix##_store,			\
469 };
470 
471 #define QUEUE_LIM_RO_ENTRY(_prefix, _name)			\
472 static struct queue_sysfs_entry _prefix##_entry = {	\
473 	.attr		= { .name = _name, .mode = 0444 },	\
474 	.show_limit	= _prefix##_show,			\
475 }
476 
477 #define QUEUE_LIM_RW_ENTRY(_prefix, _name)			\
478 static struct queue_sysfs_entry _prefix##_entry = {	\
479 	.attr		= { .name = _name, .mode = 0644 },	\
480 	.show_limit	= _prefix##_show,			\
481 	.store_limit	= _prefix##_store,			\
482 }
483 
484 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
485 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
486 QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
487 QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
488 QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments");
489 QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
490 QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size");
491 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
492 
493 QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size");
494 QUEUE_LIM_RO_ENTRY(queue_physical_block_size, "physical_block_size");
495 QUEUE_LIM_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
496 QUEUE_LIM_RO_ENTRY(queue_io_min, "minimum_io_size");
497 QUEUE_LIM_RO_ENTRY(queue_io_opt, "optimal_io_size");
498 
499 QUEUE_LIM_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
500 QUEUE_LIM_RO_ENTRY(queue_discard_granularity, "discard_granularity");
501 QUEUE_LIM_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
502 QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
503 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
504 
505 QUEUE_LIM_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
506 QUEUE_LIM_RO_ENTRY(queue_atomic_write_boundary_sectors,
507 		"atomic_write_boundary_bytes");
508 QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
509 QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
510 
511 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
512 QUEUE_LIM_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
513 QUEUE_LIM_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
514 QUEUE_LIM_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
515 
516 QUEUE_LIM_RO_ENTRY(queue_zoned, "zoned");
517 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
518 QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones");
519 QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones");
520 
521 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
522 QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
523 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
524 QUEUE_RW_ENTRY(queue_poll, "io_poll");
525 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
526 QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
527 QUEUE_LIM_RO_ENTRY(queue_fua, "fua");
528 QUEUE_LIM_RO_ENTRY(queue_dax, "dax");
529 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
530 QUEUE_LIM_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
531 QUEUE_LIM_RO_ENTRY(queue_dma_alignment, "dma_alignment");
532 
533 /* legacy alias for logical_block_size: */
534 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
535 	.attr		= {.name = "hw_sector_size", .mode = 0444 },
536 	.show_limit	= queue_logical_block_size_show,
537 };
538 
539 QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational");
540 QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats");
541 QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random");
542 QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes");
543 
544 #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)545 static ssize_t queue_var_store64(s64 *var, const char *page)
546 {
547 	int err;
548 	s64 v;
549 
550 	err = kstrtos64(page, 10, &v);
551 	if (err < 0)
552 		return err;
553 
554 	*var = v;
555 	return 0;
556 }
557 
queue_wb_lat_show(struct gendisk * disk,char * page)558 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
559 {
560 	ssize_t ret;
561 	struct request_queue *q = disk->queue;
562 
563 	mutex_lock(&q->elevator_lock);
564 	if (!wbt_rq_qos(q)) {
565 		ret = -EINVAL;
566 		goto out;
567 	}
568 
569 	if (wbt_disabled(q)) {
570 		ret = sysfs_emit(page, "0\n");
571 		goto out;
572 	}
573 
574 	ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
575 out:
576 	mutex_unlock(&q->elevator_lock);
577 	return ret;
578 }
579 
queue_wb_lat_store(struct gendisk * disk,const char * page,size_t count)580 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
581 				  size_t count)
582 {
583 	struct request_queue *q = disk->queue;
584 	struct rq_qos *rqos;
585 	ssize_t ret;
586 	s64 val;
587 	unsigned int memflags;
588 
589 	ret = queue_var_store64(&val, page);
590 	if (ret < 0)
591 		return ret;
592 	if (val < -1)
593 		return -EINVAL;
594 
595 	memflags = blk_mq_freeze_queue(q);
596 	mutex_lock(&q->elevator_lock);
597 
598 	rqos = wbt_rq_qos(q);
599 	if (!rqos) {
600 		ret = wbt_init(disk);
601 		if (ret)
602 			goto out;
603 	}
604 
605 	ret = count;
606 	if (val == -1)
607 		val = wbt_default_latency_nsec(q);
608 	else if (val >= 0)
609 		val *= 1000ULL;
610 
611 	if (wbt_get_min_lat(q) == val)
612 		goto out;
613 
614 	/*
615 	 * Ensure that the queue is idled, in case the latency update
616 	 * ends up either enabling or disabling wbt completely. We can't
617 	 * have IO inflight if that happens.
618 	 */
619 	blk_mq_quiesce_queue(q);
620 
621 	wbt_set_min_lat(q, val);
622 
623 	blk_mq_unquiesce_queue(q);
624 out:
625 	mutex_unlock(&q->elevator_lock);
626 	blk_mq_unfreeze_queue(q, memflags);
627 
628 	return ret;
629 }
630 
631 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
632 #endif
633 
634 /* Common attributes for bio-based and request-based queues. */
635 static struct attribute *queue_attrs[] = {
636 	/*
637 	 * Attributes which are protected with q->limits_lock.
638 	 */
639 	&queue_max_hw_sectors_entry.attr,
640 	&queue_max_sectors_entry.attr,
641 	&queue_max_segments_entry.attr,
642 	&queue_max_discard_segments_entry.attr,
643 	&queue_max_integrity_segments_entry.attr,
644 	&queue_max_segment_size_entry.attr,
645 	&queue_hw_sector_size_entry.attr,
646 	&queue_logical_block_size_entry.attr,
647 	&queue_physical_block_size_entry.attr,
648 	&queue_chunk_sectors_entry.attr,
649 	&queue_io_min_entry.attr,
650 	&queue_io_opt_entry.attr,
651 	&queue_discard_granularity_entry.attr,
652 	&queue_max_discard_sectors_entry.attr,
653 	&queue_max_hw_discard_sectors_entry.attr,
654 	&queue_atomic_write_max_sectors_entry.attr,
655 	&queue_atomic_write_boundary_sectors_entry.attr,
656 	&queue_atomic_write_unit_min_entry.attr,
657 	&queue_atomic_write_unit_max_entry.attr,
658 	&queue_max_write_zeroes_sectors_entry.attr,
659 	&queue_max_zone_append_sectors_entry.attr,
660 	&queue_zone_write_granularity_entry.attr,
661 	&queue_rotational_entry.attr,
662 	&queue_zoned_entry.attr,
663 	&queue_max_open_zones_entry.attr,
664 	&queue_max_active_zones_entry.attr,
665 	&queue_iostats_passthrough_entry.attr,
666 	&queue_iostats_entry.attr,
667 	&queue_stable_writes_entry.attr,
668 	&queue_add_random_entry.attr,
669 	&queue_wc_entry.attr,
670 	&queue_fua_entry.attr,
671 	&queue_dax_entry.attr,
672 	&queue_virt_boundary_mask_entry.attr,
673 	&queue_dma_alignment_entry.attr,
674 	&queue_ra_entry.attr,
675 
676 	/*
677 	 * Attributes which don't require locking.
678 	 */
679 	&queue_discard_zeroes_data_entry.attr,
680 	&queue_write_same_max_entry.attr,
681 	&queue_nr_zones_entry.attr,
682 	&queue_nomerges_entry.attr,
683 	&queue_poll_entry.attr,
684 	&queue_poll_delay_entry.attr,
685 
686 	NULL,
687 };
688 
689 /* Request-based queue attributes that are not relevant for bio-based queues. */
690 static struct attribute *blk_mq_queue_attrs[] = {
691 	/*
692 	 * Attributes which require some form of locking other than
693 	 * q->sysfs_lock.
694 	 */
695 	&elv_iosched_entry.attr,
696 	&queue_requests_entry.attr,
697 #ifdef CONFIG_BLK_WBT
698 	&queue_wb_lat_entry.attr,
699 #endif
700 	/*
701 	 * Attributes which don't require locking.
702 	 */
703 	&queue_rq_affinity_entry.attr,
704 	&queue_io_timeout_entry.attr,
705 
706 	NULL,
707 };
708 
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)709 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
710 				int n)
711 {
712 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
713 	struct request_queue *q = disk->queue;
714 
715 	if ((attr == &queue_max_open_zones_entry.attr ||
716 	     attr == &queue_max_active_zones_entry.attr) &&
717 	    !blk_queue_is_zoned(q))
718 		return 0;
719 
720 	return attr->mode;
721 }
722 
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)723 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
724 					 struct attribute *attr, int n)
725 {
726 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
727 	struct request_queue *q = disk->queue;
728 
729 	if (!queue_is_mq(q))
730 		return 0;
731 
732 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
733 		return 0;
734 
735 	return attr->mode;
736 }
737 
738 static struct attribute_group queue_attr_group = {
739 	.attrs = queue_attrs,
740 	.is_visible = queue_attr_visible,
741 };
742 
743 static struct attribute_group blk_mq_queue_attr_group = {
744 	.attrs = blk_mq_queue_attrs,
745 	.is_visible = blk_mq_queue_attr_visible,
746 };
747 
748 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
749 
750 static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)751 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
752 {
753 	struct queue_sysfs_entry *entry = to_queue(attr);
754 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
755 
756 	if (!entry->show && !entry->show_limit)
757 		return -EIO;
758 
759 	if (entry->show_limit) {
760 		ssize_t res;
761 
762 		mutex_lock(&disk->queue->limits_lock);
763 		res = entry->show_limit(disk, page);
764 		mutex_unlock(&disk->queue->limits_lock);
765 		return res;
766 	}
767 
768 	return entry->show(disk, page);
769 }
770 
771 static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)772 queue_attr_store(struct kobject *kobj, struct attribute *attr,
773 		    const char *page, size_t length)
774 {
775 	struct queue_sysfs_entry *entry = to_queue(attr);
776 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
777 	struct request_queue *q = disk->queue;
778 
779 	if (!entry->store_limit && !entry->store)
780 		return -EIO;
781 
782 	if (entry->store_limit) {
783 		ssize_t res;
784 
785 		struct queue_limits lim = queue_limits_start_update(q);
786 
787 		res = entry->store_limit(disk, page, length, &lim);
788 		if (res < 0) {
789 			queue_limits_cancel_update(q);
790 			return res;
791 		}
792 
793 		res = queue_limits_commit_update_frozen(q, &lim);
794 		if (res)
795 			return res;
796 		return length;
797 	}
798 
799 	return entry->store(disk, page, length);
800 }
801 
802 static const struct sysfs_ops queue_sysfs_ops = {
803 	.show	= queue_attr_show,
804 	.store	= queue_attr_store,
805 };
806 
807 static const struct attribute_group *blk_queue_attr_groups[] = {
808 	&queue_attr_group,
809 	&blk_mq_queue_attr_group,
810 	NULL
811 };
812 
blk_queue_release(struct kobject * kobj)813 static void blk_queue_release(struct kobject *kobj)
814 {
815 	/* nothing to do here, all data is associated with the parent gendisk */
816 }
817 
818 static const struct kobj_type blk_queue_ktype = {
819 	.default_groups = blk_queue_attr_groups,
820 	.sysfs_ops	= &queue_sysfs_ops,
821 	.release	= blk_queue_release,
822 };
823 
blk_debugfs_remove(struct gendisk * disk)824 static void blk_debugfs_remove(struct gendisk *disk)
825 {
826 	struct request_queue *q = disk->queue;
827 
828 	mutex_lock(&q->debugfs_mutex);
829 	blk_trace_shutdown(q);
830 	debugfs_remove_recursive(q->debugfs_dir);
831 	q->debugfs_dir = NULL;
832 	q->sched_debugfs_dir = NULL;
833 	q->rqos_debugfs_dir = NULL;
834 	mutex_unlock(&q->debugfs_mutex);
835 }
836 
837 /**
838  * blk_register_queue - register a block layer queue with sysfs
839  * @disk: Disk of which the request queue should be registered with sysfs.
840  */
blk_register_queue(struct gendisk * disk)841 int blk_register_queue(struct gendisk *disk)
842 {
843 	struct request_queue *q = disk->queue;
844 	int ret;
845 
846 	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
847 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
848 	if (ret < 0)
849 		goto out_put_queue_kobj;
850 
851 	if (queue_is_mq(q)) {
852 		ret = blk_mq_sysfs_register(disk);
853 		if (ret)
854 			goto out_put_queue_kobj;
855 	}
856 	mutex_lock(&q->sysfs_lock);
857 
858 	mutex_lock(&q->debugfs_mutex);
859 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
860 	if (queue_is_mq(q))
861 		blk_mq_debugfs_register(q);
862 	mutex_unlock(&q->debugfs_mutex);
863 
864 	ret = disk_register_independent_access_ranges(disk);
865 	if (ret)
866 		goto out_debugfs_remove;
867 
868 	ret = blk_crypto_sysfs_register(disk);
869 	if (ret)
870 		goto out_unregister_ia_ranges;
871 
872 	mutex_lock(&q->elevator_lock);
873 	if (q->elevator) {
874 		ret = elv_register_queue(q, false);
875 		if (ret) {
876 			mutex_unlock(&q->elevator_lock);
877 			goto out_crypto_sysfs_unregister;
878 		}
879 	}
880 	wbt_enable_default(disk);
881 	mutex_unlock(&q->elevator_lock);
882 
883 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
884 
885 	/* Now everything is ready and send out KOBJ_ADD uevent */
886 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
887 	if (q->elevator)
888 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
889 	mutex_unlock(&q->sysfs_lock);
890 
891 	/*
892 	 * SCSI probing may synchronously create and destroy a lot of
893 	 * request_queues for non-existent devices.  Shutting down a fully
894 	 * functional queue takes measureable wallclock time as RCU grace
895 	 * periods are involved.  To avoid excessive latency in these
896 	 * cases, a request_queue starts out in a degraded mode which is
897 	 * faster to shut down and is made fully functional here as
898 	 * request_queues for non-existent devices never get registered.
899 	 */
900 	blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
901 	percpu_ref_switch_to_percpu(&q->q_usage_counter);
902 
903 	return ret;
904 
905 out_crypto_sysfs_unregister:
906 	blk_crypto_sysfs_unregister(disk);
907 out_unregister_ia_ranges:
908 	disk_unregister_independent_access_ranges(disk);
909 out_debugfs_remove:
910 	blk_debugfs_remove(disk);
911 	mutex_unlock(&q->sysfs_lock);
912 out_put_queue_kobj:
913 	kobject_put(&disk->queue_kobj);
914 	return ret;
915 }
916 
917 /**
918  * blk_unregister_queue - counterpart of blk_register_queue()
919  * @disk: Disk of which the request queue should be unregistered from sysfs.
920  *
921  * Note: the caller is responsible for guaranteeing that this function is called
922  * after blk_register_queue() has finished.
923  */
blk_unregister_queue(struct gendisk * disk)924 void blk_unregister_queue(struct gendisk *disk)
925 {
926 	struct request_queue *q = disk->queue;
927 
928 	if (WARN_ON(!q))
929 		return;
930 
931 	/* Return early if disk->queue was never registered. */
932 	if (!blk_queue_registered(q))
933 		return;
934 
935 	/*
936 	 * Since sysfs_remove_dir() prevents adding new directory entries
937 	 * before removal of existing entries starts, protect against
938 	 * concurrent elv_iosched_store() calls.
939 	 */
940 	mutex_lock(&q->sysfs_lock);
941 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
942 	mutex_unlock(&q->sysfs_lock);
943 
944 	/*
945 	 * Remove the sysfs attributes before unregistering the queue data
946 	 * structures that can be modified through sysfs.
947 	 */
948 	if (queue_is_mq(q))
949 		blk_mq_sysfs_unregister(disk);
950 	blk_crypto_sysfs_unregister(disk);
951 
952 	mutex_lock(&q->elevator_lock);
953 	elv_unregister_queue(q);
954 	mutex_unlock(&q->elevator_lock);
955 
956 	mutex_lock(&q->sysfs_lock);
957 	disk_unregister_independent_access_ranges(disk);
958 	mutex_unlock(&q->sysfs_lock);
959 
960 	/* Now that we've deleted all child objects, we can delete the queue. */
961 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
962 	kobject_del(&disk->queue_kobj);
963 
964 	blk_debugfs_remove(disk);
965 }
966