xref: /linux/block/blk-sysfs.c (revision a028739a4330881a6a3b5aa4a39381bbcacf2f2f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct gendisk *disk, char *page);
26 	ssize_t (*show_limit)(struct gendisk *disk, char *page);
27 
28 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
29 	int (*store_limit)(struct gendisk *disk, const char *page,
30 			size_t count, struct queue_limits *lim);
31 };
32 
33 static ssize_t
queue_var_show(unsigned long var,char * page)34 queue_var_show(unsigned long var, char *page)
35 {
36 	return sysfs_emit(page, "%lu\n", var);
37 }
38 
39 static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)40 queue_var_store(unsigned long *var, const char *page, size_t count)
41 {
42 	int err;
43 	unsigned long v;
44 
45 	err = kstrtoul(page, 10, &v);
46 	if (err || v > UINT_MAX)
47 		return -EINVAL;
48 
49 	*var = v;
50 
51 	return count;
52 }
53 
queue_requests_show(struct gendisk * disk,char * page)54 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
55 {
56 	ssize_t ret;
57 
58 	mutex_lock(&disk->queue->elevator_lock);
59 	ret = queue_var_show(disk->queue->nr_requests, page);
60 	mutex_unlock(&disk->queue->elevator_lock);
61 	return ret;
62 }
63 
64 static ssize_t
queue_requests_store(struct gendisk * disk,const char * page,size_t count)65 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
66 {
67 	struct request_queue *q = disk->queue;
68 	struct blk_mq_tag_set *set = q->tag_set;
69 	struct elevator_tags *et = NULL;
70 	unsigned int memflags;
71 	unsigned long nr;
72 	int ret;
73 
74 	ret = queue_var_store(&nr, page, count);
75 	if (ret < 0)
76 		return ret;
77 
78 	/*
79 	 * Serialize updating nr_requests with concurrent queue_requests_store()
80 	 * and switching elevator.
81 	 *
82 	 * Use trylock to avoid circular lock dependency with kernfs active
83 	 * reference during concurrent disk deletion:
84 	 *   update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
85 	 *   kn->active -> update_nr_hwq_lock (via this sysfs write path)
86 	 */
87 	if (!down_write_trylock(&set->update_nr_hwq_lock))
88 		return -EBUSY;
89 
90 	if (nr == q->nr_requests)
91 		goto unlock;
92 
93 	if (nr < BLKDEV_MIN_RQ)
94 		nr = BLKDEV_MIN_RQ;
95 
96 	/*
97 	 * Switching elevator is protected by update_nr_hwq_lock:
98 	 *  - read lock is held from elevator sysfs attribute;
99 	 *  - write lock is held from updating nr_hw_queues;
100 	 * Hence it's safe to access q->elevator here with write lock held.
101 	 */
102 	if (nr <= set->reserved_tags ||
103 	    (q->elevator && nr > MAX_SCHED_RQ) ||
104 	    (!q->elevator && nr > set->queue_depth)) {
105 		ret = -EINVAL;
106 		goto unlock;
107 	}
108 
109 	if (!blk_mq_is_shared_tags(set->flags) && q->elevator &&
110 	    nr > q->elevator->et->nr_requests) {
111 		/*
112 		 * Tags will grow, allocate memory before freezing queue to
113 		 * prevent deadlock.
114 		 */
115 		et = blk_mq_alloc_sched_tags(set, q->nr_hw_queues, nr);
116 		if (!et) {
117 			ret = -ENOMEM;
118 			goto unlock;
119 		}
120 	}
121 
122 	memflags = blk_mq_freeze_queue(q);
123 	mutex_lock(&q->elevator_lock);
124 	et = blk_mq_update_nr_requests(q, et, nr);
125 	mutex_unlock(&q->elevator_lock);
126 	blk_mq_unfreeze_queue(q, memflags);
127 
128 	if (et)
129 		blk_mq_free_sched_tags(et, set);
130 
131 unlock:
132 	up_write(&set->update_nr_hwq_lock);
133 	return ret;
134 }
135 
queue_async_depth_show(struct gendisk * disk,char * page)136 static ssize_t queue_async_depth_show(struct gendisk *disk, char *page)
137 {
138 	guard(mutex)(&disk->queue->elevator_lock);
139 
140 	return queue_var_show(disk->queue->async_depth, page);
141 }
142 
143 static ssize_t
queue_async_depth_store(struct gendisk * disk,const char * page,size_t count)144 queue_async_depth_store(struct gendisk *disk, const char *page, size_t count)
145 {
146 	struct request_queue *q = disk->queue;
147 	unsigned int memflags;
148 	unsigned long nr;
149 	int ret;
150 
151 	if (!queue_is_mq(q))
152 		return -EINVAL;
153 
154 	ret = queue_var_store(&nr, page, count);
155 	if (ret < 0)
156 		return ret;
157 
158 	if (nr == 0)
159 		return -EINVAL;
160 
161 	memflags = blk_mq_freeze_queue(q);
162 	scoped_guard(mutex, &q->elevator_lock) {
163 		if (q->elevator) {
164 			q->async_depth = min(q->nr_requests, nr);
165 			if (q->elevator->type->ops.depth_updated)
166 				q->elevator->type->ops.depth_updated(q);
167 		} else {
168 			ret = -EINVAL;
169 		}
170 	}
171 	blk_mq_unfreeze_queue(q, memflags);
172 
173 	return ret;
174 }
175 
queue_ra_show(struct gendisk * disk,char * page)176 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
177 {
178 	ssize_t ret;
179 
180 	mutex_lock(&disk->queue->limits_lock);
181 	ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
182 	mutex_unlock(&disk->queue->limits_lock);
183 
184 	return ret;
185 }
186 
187 static ssize_t
queue_ra_store(struct gendisk * disk,const char * page,size_t count)188 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
189 {
190 	unsigned long ra_kb;
191 	ssize_t ret;
192 	struct request_queue *q = disk->queue;
193 
194 	ret = queue_var_store(&ra_kb, page, count);
195 	if (ret < 0)
196 		return ret;
197 	/*
198 	 * The ->ra_pages change below is protected by ->limits_lock because it
199 	 * is usually calculated from the queue limits by
200 	 * queue_limits_commit_update().
201 	 *
202 	 * bdi->ra_pages reads are not serialized against bdi->ra_pages writes.
203 	 * Use WRITE_ONCE() to write bdi->ra_pages once.
204 	 */
205 	mutex_lock(&q->limits_lock);
206 	WRITE_ONCE(disk->bdi->ra_pages, ra_kb >> (PAGE_SHIFT - 10));
207 	mutex_unlock(&q->limits_lock);
208 
209 	return ret;
210 }
211 
212 #define QUEUE_SYSFS_LIMIT_SHOW(_field)					\
213 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
214 {									\
215 	return queue_var_show(disk->queue->limits._field, page);	\
216 }
217 
218 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)219 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
220 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
221 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
222 QUEUE_SYSFS_LIMIT_SHOW(max_write_streams)
223 QUEUE_SYSFS_LIMIT_SHOW(write_stream_granularity)
224 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
225 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
226 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
227 QUEUE_SYSFS_LIMIT_SHOW(io_min)
228 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
229 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
230 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
231 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
232 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
233 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
234 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
235 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
236 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
237 
238 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field)			\
239 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
240 {									\
241 	return sysfs_emit(page, "%llu\n",				\
242 		(unsigned long long)disk->queue->limits._field <<	\
243 			SECTOR_SHIFT);					\
244 }
245 
246 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
247 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
248 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
249 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_wzeroes_unmap_sectors)
250 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_wzeroes_unmap_sectors)
251 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
252 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
253 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors)
254 
255 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field)			\
256 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page)	\
257 {									\
258 	return queue_var_show(disk->queue->limits._field >> 1, page);	\
259 }
260 
261 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
262 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
263 
264 #define QUEUE_SYSFS_SHOW_CONST(_name, _val)				\
265 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
266 {									\
267 	return sysfs_emit(page, "%d\n", _val);				\
268 }
269 
270 /* deprecated fields */
271 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
272 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
273 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
274 
275 static int queue_max_discard_sectors_store(struct gendisk *disk,
276 		const char *page, size_t count, struct queue_limits *lim)
277 {
278 	unsigned long max_discard_bytes;
279 	ssize_t ret;
280 
281 	ret = queue_var_store(&max_discard_bytes, page, count);
282 	if (ret < 0)
283 		return ret;
284 
285 	if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
286 		return -EINVAL;
287 
288 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
289 		return -EINVAL;
290 
291 	lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
292 	return 0;
293 }
294 
queue_max_wzeroes_unmap_sectors_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)295 static int queue_max_wzeroes_unmap_sectors_store(struct gendisk *disk,
296 		const char *page, size_t count, struct queue_limits *lim)
297 {
298 	unsigned long max_zeroes_bytes, max_hw_zeroes_bytes;
299 	ssize_t ret;
300 
301 	ret = queue_var_store(&max_zeroes_bytes, page, count);
302 	if (ret < 0)
303 		return ret;
304 
305 	max_hw_zeroes_bytes = lim->max_hw_wzeroes_unmap_sectors << SECTOR_SHIFT;
306 	if (max_zeroes_bytes != 0 && max_zeroes_bytes != max_hw_zeroes_bytes)
307 		return -EINVAL;
308 
309 	lim->max_user_wzeroes_unmap_sectors = max_zeroes_bytes >> SECTOR_SHIFT;
310 	return 0;
311 }
312 
313 static int
queue_max_sectors_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)314 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count,
315 		struct queue_limits *lim)
316 {
317 	unsigned long max_sectors_kb;
318 	ssize_t ret;
319 
320 	ret = queue_var_store(&max_sectors_kb, page, count);
321 	if (ret < 0)
322 		return ret;
323 
324 	lim->max_user_sectors = max_sectors_kb << 1;
325 	return 0;
326 }
327 
queue_feature_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim,blk_features_t feature)328 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
329 		size_t count, struct queue_limits *lim, blk_features_t feature)
330 {
331 	unsigned long val;
332 	ssize_t ret;
333 
334 	ret = queue_var_store(&val, page, count);
335 	if (ret < 0)
336 		return ret;
337 
338 	if (val)
339 		lim->features |= feature;
340 	else
341 		lim->features &= ~feature;
342 	return 0;
343 }
344 
345 #define QUEUE_SYSFS_FEATURE(_name, _feature)				\
346 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
347 {									\
348 	return sysfs_emit(page, "%u\n",					\
349 		!!(disk->queue->limits.features & _feature));		\
350 }									\
351 static int queue_##_name##_store(struct gendisk *disk,			\
352 		const char *page, size_t count, struct queue_limits *lim) \
353 {									\
354 	return queue_feature_store(disk, page, count, lim, _feature);	\
355 }
356 
357 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
358 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
359 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
360 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
361 
362 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature)			\
363 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
364 {									\
365 	return sysfs_emit(page, "%u\n",					\
366 		!!(disk->queue->limits.features & _feature));		\
367 }
368 
369 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
370 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
371 
queue_poll_show(struct gendisk * disk,char * page)372 static ssize_t queue_poll_show(struct gendisk *disk, char *page)
373 {
374 	if (queue_is_mq(disk->queue))
375 		return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
376 
377 	return sysfs_emit(page, "%u\n",
378 			!!(disk->queue->limits.features & BLK_FEAT_POLL));
379 }
380 
queue_zoned_show(struct gendisk * disk,char * page)381 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
382 {
383 	if (blk_queue_is_zoned(disk->queue))
384 		return sysfs_emit(page, "host-managed\n");
385 	return sysfs_emit(page, "none\n");
386 }
387 
queue_nr_zones_show(struct gendisk * disk,char * page)388 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
389 {
390 	return queue_var_show(disk_nr_zones(disk), page);
391 }
392 
queue_iostats_passthrough_show(struct gendisk * disk,char * page)393 static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
394 {
395 	return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
396 }
397 
queue_iostats_passthrough_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)398 static int queue_iostats_passthrough_store(struct gendisk *disk,
399 		const char *page, size_t count, struct queue_limits *lim)
400 {
401 	unsigned long ios;
402 	ssize_t ret;
403 
404 	ret = queue_var_store(&ios, page, count);
405 	if (ret < 0)
406 		return ret;
407 
408 	if (ios)
409 		lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
410 	else
411 		lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
412 	return 0;
413 }
414 
queue_nomerges_show(struct gendisk * disk,char * page)415 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
416 {
417 	return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
418 			       blk_queue_noxmerges(disk->queue), page);
419 }
420 
queue_nomerges_store(struct gendisk * disk,const char * page,size_t count)421 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
422 				    size_t count)
423 {
424 	unsigned long nm;
425 	struct request_queue *q = disk->queue;
426 	ssize_t ret = queue_var_store(&nm, page, count);
427 
428 	if (ret < 0)
429 		return ret;
430 
431 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
432 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
433 	if (nm == 2)
434 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
435 	else if (nm)
436 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
437 
438 	return ret;
439 }
440 
queue_rq_affinity_show(struct gendisk * disk,char * page)441 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
442 {
443 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
444 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
445 
446 	return queue_var_show(set << force, page);
447 }
448 
449 static ssize_t
queue_rq_affinity_store(struct gendisk * disk,const char * page,size_t count)450 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
451 {
452 	ssize_t ret = -EINVAL;
453 #ifdef CONFIG_SMP
454 	struct request_queue *q = disk->queue;
455 	unsigned long val;
456 
457 	ret = queue_var_store(&val, page, count);
458 	if (ret < 0)
459 		return ret;
460 
461 	/*
462 	 * Here we update two queue flags each using atomic bitops, although
463 	 * updating two flags isn't atomic it should be harmless as those flags
464 	 * are accessed individually using atomic test_bit operation. So we
465 	 * don't grab any lock while updating these flags.
466 	 */
467 	if (val == 2) {
468 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
469 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
470 	} else if (val == 1) {
471 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
472 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
473 	} else if (val == 0) {
474 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
475 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
476 	}
477 #endif
478 	return ret;
479 }
480 
queue_poll_delay_store(struct gendisk * disk,const char * page,size_t count)481 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
482 				size_t count)
483 {
484 	return count;
485 }
486 
queue_poll_store(struct gendisk * disk,const char * page,size_t count)487 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
488 				size_t count)
489 {
490 	ssize_t ret = count;
491 	struct request_queue *q = disk->queue;
492 
493 	if (!(q->limits.features & BLK_FEAT_POLL)) {
494 		ret = -EINVAL;
495 		goto out;
496 	}
497 
498 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
499 	pr_info_ratelimited("please use driver specific parameters instead.\n");
500 out:
501 	return ret;
502 }
503 
queue_io_timeout_show(struct gendisk * disk,char * page)504 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
505 {
506 	return sysfs_emit(page, "%u\n",
507 			jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout)));
508 }
509 
queue_io_timeout_store(struct gendisk * disk,const char * page,size_t count)510 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
511 				  size_t count)
512 {
513 	unsigned int val;
514 	int err;
515 	struct request_queue *q = disk->queue;
516 
517 	err = kstrtou32(page, 10, &val);
518 	if (err || val == 0)
519 		return -EINVAL;
520 
521 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
522 
523 	return count;
524 }
525 
queue_wc_show(struct gendisk * disk,char * page)526 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
527 {
528 	if (blk_queue_write_cache(disk->queue))
529 		return sysfs_emit(page, "write back\n");
530 	return sysfs_emit(page, "write through\n");
531 }
532 
queue_wc_store(struct gendisk * disk,const char * page,size_t count,struct queue_limits * lim)533 static int queue_wc_store(struct gendisk *disk, const char *page,
534 		size_t count, struct queue_limits *lim)
535 {
536 	bool disable;
537 
538 	if (!strncmp(page, "write back", 10)) {
539 		disable = false;
540 	} else if (!strncmp(page, "write through", 13) ||
541 		   !strncmp(page, "none", 4)) {
542 		disable = true;
543 	} else {
544 		return -EINVAL;
545 	}
546 
547 	if (disable)
548 		lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
549 	else
550 		lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
551 	return 0;
552 }
553 
554 #define QUEUE_RO_ENTRY(_prefix, _name)			\
555 static struct queue_sysfs_entry _prefix##_entry = {	\
556 	.attr	= { .name = _name, .mode = 0444 },	\
557 	.show	= _prefix##_show,			\
558 };
559 
560 #define QUEUE_RW_ENTRY(_prefix, _name)			\
561 static struct queue_sysfs_entry _prefix##_entry = {	\
562 	.attr	= { .name = _name, .mode = 0644 },	\
563 	.show	= _prefix##_show,			\
564 	.store	= _prefix##_store,			\
565 };
566 
567 #define QUEUE_LIM_RO_ENTRY(_prefix, _name)			\
568 static struct queue_sysfs_entry _prefix##_entry = {	\
569 	.attr		= { .name = _name, .mode = 0444 },	\
570 	.show_limit	= _prefix##_show,			\
571 }
572 
573 #define QUEUE_LIM_RW_ENTRY(_prefix, _name)			\
574 static struct queue_sysfs_entry _prefix##_entry = {	\
575 	.attr		= { .name = _name, .mode = 0644 },	\
576 	.show_limit	= _prefix##_show,			\
577 	.store_limit	= _prefix##_store,			\
578 }
579 
580 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
581 QUEUE_RW_ENTRY(queue_async_depth, "async_depth");
582 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
583 QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
584 QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
585 QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments");
586 QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
587 QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size");
588 QUEUE_LIM_RO_ENTRY(queue_max_write_streams, "max_write_streams");
589 QUEUE_LIM_RO_ENTRY(queue_write_stream_granularity, "write_stream_granularity");
590 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
591 
592 QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size");
593 QUEUE_LIM_RO_ENTRY(queue_physical_block_size, "physical_block_size");
594 QUEUE_LIM_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
595 QUEUE_LIM_RO_ENTRY(queue_io_min, "minimum_io_size");
596 QUEUE_LIM_RO_ENTRY(queue_io_opt, "optimal_io_size");
597 
598 QUEUE_LIM_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
599 QUEUE_LIM_RO_ENTRY(queue_discard_granularity, "discard_granularity");
600 QUEUE_LIM_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
601 QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
602 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
603 
604 QUEUE_LIM_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
605 QUEUE_LIM_RO_ENTRY(queue_atomic_write_boundary_sectors,
606 		"atomic_write_boundary_bytes");
607 QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
608 QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
609 
610 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
611 QUEUE_LIM_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
612 QUEUE_LIM_RO_ENTRY(queue_max_hw_wzeroes_unmap_sectors,
613 		"write_zeroes_unmap_max_hw_bytes");
614 QUEUE_LIM_RW_ENTRY(queue_max_wzeroes_unmap_sectors,
615 		"write_zeroes_unmap_max_bytes");
616 QUEUE_LIM_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
617 QUEUE_LIM_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
618 
619 QUEUE_LIM_RO_ENTRY(queue_zoned, "zoned");
620 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
621 QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones");
622 QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones");
623 
624 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
625 QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
626 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
627 QUEUE_RW_ENTRY(queue_poll, "io_poll");
628 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
629 QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
630 QUEUE_LIM_RO_ENTRY(queue_fua, "fua");
631 QUEUE_LIM_RO_ENTRY(queue_dax, "dax");
632 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
633 QUEUE_LIM_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
634 QUEUE_LIM_RO_ENTRY(queue_dma_alignment, "dma_alignment");
635 
636 /* legacy alias for logical_block_size: */
637 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
638 	.attr		= {.name = "hw_sector_size", .mode = 0444 },
639 	.show_limit	= queue_logical_block_size_show,
640 };
641 
642 QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational");
643 QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats");
644 QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random");
645 QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes");
646 
647 #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)648 static ssize_t queue_var_store64(s64 *var, const char *page)
649 {
650 	int err;
651 	s64 v;
652 
653 	err = kstrtos64(page, 10, &v);
654 	if (err < 0)
655 		return err;
656 
657 	*var = v;
658 	return 0;
659 }
660 
queue_wb_lat_show(struct gendisk * disk,char * page)661 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
662 {
663 	ssize_t ret;
664 	struct request_queue *q = disk->queue;
665 
666 	mutex_lock(&disk->rqos_state_mutex);
667 	if (!wbt_rq_qos(q)) {
668 		ret = -EINVAL;
669 		goto out;
670 	}
671 
672 	if (wbt_disabled(q)) {
673 		ret = sysfs_emit(page, "0\n");
674 		goto out;
675 	}
676 
677 	ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
678 out:
679 	mutex_unlock(&disk->rqos_state_mutex);
680 	return ret;
681 }
682 
queue_wb_lat_store(struct gendisk * disk,const char * page,size_t count)683 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
684 				  size_t count)
685 {
686 	ssize_t ret;
687 	s64 val;
688 
689 	ret = queue_var_store64(&val, page);
690 	if (ret < 0)
691 		return ret;
692 	if (val < -1)
693 		return -EINVAL;
694 
695 	ret = wbt_set_lat(disk, val);
696 	return ret ? ret : count;
697 }
698 
699 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
700 #endif
701 
702 /* Common attributes for bio-based and request-based queues. */
703 static struct attribute *queue_attrs[] = {
704 	/*
705 	 * Attributes which are protected with q->limits_lock.
706 	 */
707 	&queue_max_hw_sectors_entry.attr,
708 	&queue_max_sectors_entry.attr,
709 	&queue_max_segments_entry.attr,
710 	&queue_max_discard_segments_entry.attr,
711 	&queue_max_integrity_segments_entry.attr,
712 	&queue_max_segment_size_entry.attr,
713 	&queue_max_write_streams_entry.attr,
714 	&queue_write_stream_granularity_entry.attr,
715 	&queue_hw_sector_size_entry.attr,
716 	&queue_logical_block_size_entry.attr,
717 	&queue_physical_block_size_entry.attr,
718 	&queue_chunk_sectors_entry.attr,
719 	&queue_io_min_entry.attr,
720 	&queue_io_opt_entry.attr,
721 	&queue_discard_granularity_entry.attr,
722 	&queue_max_discard_sectors_entry.attr,
723 	&queue_max_hw_discard_sectors_entry.attr,
724 	&queue_atomic_write_max_sectors_entry.attr,
725 	&queue_atomic_write_boundary_sectors_entry.attr,
726 	&queue_atomic_write_unit_min_entry.attr,
727 	&queue_atomic_write_unit_max_entry.attr,
728 	&queue_max_write_zeroes_sectors_entry.attr,
729 	&queue_max_hw_wzeroes_unmap_sectors_entry.attr,
730 	&queue_max_wzeroes_unmap_sectors_entry.attr,
731 	&queue_max_zone_append_sectors_entry.attr,
732 	&queue_zone_write_granularity_entry.attr,
733 	&queue_rotational_entry.attr,
734 	&queue_zoned_entry.attr,
735 	&queue_max_open_zones_entry.attr,
736 	&queue_max_active_zones_entry.attr,
737 	&queue_iostats_passthrough_entry.attr,
738 	&queue_iostats_entry.attr,
739 	&queue_stable_writes_entry.attr,
740 	&queue_add_random_entry.attr,
741 	&queue_wc_entry.attr,
742 	&queue_fua_entry.attr,
743 	&queue_dax_entry.attr,
744 	&queue_virt_boundary_mask_entry.attr,
745 	&queue_dma_alignment_entry.attr,
746 	&queue_ra_entry.attr,
747 
748 	/*
749 	 * Attributes which don't require locking.
750 	 */
751 	&queue_discard_zeroes_data_entry.attr,
752 	&queue_write_same_max_entry.attr,
753 	&queue_nr_zones_entry.attr,
754 	&queue_nomerges_entry.attr,
755 	&queue_poll_entry.attr,
756 	&queue_poll_delay_entry.attr,
757 
758 	NULL,
759 };
760 
761 /* Request-based queue attributes that are not relevant for bio-based queues. */
762 static struct attribute *blk_mq_queue_attrs[] = {
763 	/*
764 	 * Attributes which require some form of locking other than
765 	 * q->sysfs_lock.
766 	 */
767 	&elv_iosched_entry.attr,
768 	&queue_requests_entry.attr,
769 	&queue_async_depth_entry.attr,
770 #ifdef CONFIG_BLK_WBT
771 	&queue_wb_lat_entry.attr,
772 #endif
773 	/*
774 	 * Attributes which don't require locking.
775 	 */
776 	&queue_rq_affinity_entry.attr,
777 	&queue_io_timeout_entry.attr,
778 
779 	NULL,
780 };
781 
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)782 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
783 				int n)
784 {
785 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
786 	struct request_queue *q = disk->queue;
787 
788 	if ((attr == &queue_max_open_zones_entry.attr ||
789 	     attr == &queue_max_active_zones_entry.attr) &&
790 	    !blk_queue_is_zoned(q))
791 		return 0;
792 
793 	return attr->mode;
794 }
795 
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)796 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
797 					 struct attribute *attr, int n)
798 {
799 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
800 	struct request_queue *q = disk->queue;
801 
802 	if (!queue_is_mq(q))
803 		return 0;
804 
805 	if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
806 		return 0;
807 
808 	return attr->mode;
809 }
810 
811 static struct attribute_group queue_attr_group = {
812 	.attrs = queue_attrs,
813 	.is_visible = queue_attr_visible,
814 };
815 
816 static struct attribute_group blk_mq_queue_attr_group = {
817 	.attrs = blk_mq_queue_attrs,
818 	.is_visible = blk_mq_queue_attr_visible,
819 };
820 
821 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
822 
823 static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)824 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
825 {
826 	struct queue_sysfs_entry *entry = to_queue(attr);
827 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
828 
829 	if (!entry->show && !entry->show_limit)
830 		return -EIO;
831 
832 	if (entry->show_limit) {
833 		ssize_t res;
834 
835 		mutex_lock(&disk->queue->limits_lock);
836 		res = entry->show_limit(disk, page);
837 		mutex_unlock(&disk->queue->limits_lock);
838 		return res;
839 	}
840 
841 	return entry->show(disk, page);
842 }
843 
844 static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)845 queue_attr_store(struct kobject *kobj, struct attribute *attr,
846 		    const char *page, size_t length)
847 {
848 	struct queue_sysfs_entry *entry = to_queue(attr);
849 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
850 	struct request_queue *q = disk->queue;
851 
852 	if (!entry->store_limit && !entry->store)
853 		return -EIO;
854 
855 	if (entry->store_limit) {
856 		ssize_t res;
857 
858 		struct queue_limits lim = queue_limits_start_update(q);
859 
860 		res = entry->store_limit(disk, page, length, &lim);
861 		if (res < 0) {
862 			queue_limits_cancel_update(q);
863 			return res;
864 		}
865 
866 		res = queue_limits_commit_update_frozen(q, &lim);
867 		if (res)
868 			return res;
869 		return length;
870 	}
871 
872 	return entry->store(disk, page, length);
873 }
874 
875 static const struct sysfs_ops queue_sysfs_ops = {
876 	.show	= queue_attr_show,
877 	.store	= queue_attr_store,
878 };
879 
880 static const struct attribute_group *blk_queue_attr_groups[] = {
881 	&queue_attr_group,
882 	&blk_mq_queue_attr_group,
883 	NULL
884 };
885 
blk_queue_release(struct kobject * kobj)886 static void blk_queue_release(struct kobject *kobj)
887 {
888 	/* nothing to do here, all data is associated with the parent gendisk */
889 }
890 
891 const struct kobj_type blk_queue_ktype = {
892 	.default_groups = blk_queue_attr_groups,
893 	.sysfs_ops	= &queue_sysfs_ops,
894 	.release	= blk_queue_release,
895 };
896 
blk_debugfs_remove(struct gendisk * disk)897 static void blk_debugfs_remove(struct gendisk *disk)
898 {
899 	struct request_queue *q = disk->queue;
900 
901 	blk_debugfs_lock_nomemsave(q);
902 	blk_trace_shutdown(q);
903 	debugfs_remove_recursive(q->debugfs_dir);
904 	q->debugfs_dir = NULL;
905 	q->sched_debugfs_dir = NULL;
906 	q->rqos_debugfs_dir = NULL;
907 	blk_debugfs_unlock_nomemrestore(q);
908 }
909 
910 /**
911  * blk_register_queue - register a block layer queue with sysfs
912  * @disk: Disk of which the request queue should be registered with sysfs.
913  */
blk_register_queue(struct gendisk * disk)914 int blk_register_queue(struct gendisk *disk)
915 {
916 	struct request_queue *q = disk->queue;
917 	unsigned int memflags;
918 	int ret;
919 
920 	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
921 	if (ret < 0)
922 		return ret;
923 
924 	if (queue_is_mq(q)) {
925 		ret = blk_mq_sysfs_register(disk);
926 		if (ret)
927 			goto out_del_queue_kobj;
928 	}
929 	mutex_lock(&q->sysfs_lock);
930 
931 	memflags = blk_debugfs_lock(q);
932 	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
933 	if (queue_is_mq(q))
934 		blk_mq_debugfs_register(q);
935 	blk_debugfs_unlock(q, memflags);
936 
937 	ret = disk_register_independent_access_ranges(disk);
938 	if (ret)
939 		goto out_debugfs_remove;
940 
941 	ret = blk_crypto_sysfs_register(disk);
942 	if (ret)
943 		goto out_unregister_ia_ranges;
944 
945 	if (queue_is_mq(q))
946 		elevator_set_default(q);
947 
948 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
949 	wbt_init_enable_default(disk);
950 
951 	/* Now everything is ready and send out KOBJ_ADD uevent */
952 	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
953 	if (q->elevator)
954 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
955 	mutex_unlock(&q->sysfs_lock);
956 
957 	/*
958 	 * SCSI probing may synchronously create and destroy a lot of
959 	 * request_queues for non-existent devices.  Shutting down a fully
960 	 * functional queue takes measureable wallclock time as RCU grace
961 	 * periods are involved.  To avoid excessive latency in these
962 	 * cases, a request_queue starts out in a degraded mode which is
963 	 * faster to shut down and is made fully functional here as
964 	 * request_queues for non-existent devices never get registered.
965 	 */
966 	blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
967 	percpu_ref_switch_to_percpu(&q->q_usage_counter);
968 
969 	return ret;
970 
971 out_unregister_ia_ranges:
972 	disk_unregister_independent_access_ranges(disk);
973 out_debugfs_remove:
974 	blk_debugfs_remove(disk);
975 	mutex_unlock(&q->sysfs_lock);
976 	if (queue_is_mq(q))
977 		blk_mq_sysfs_unregister(disk);
978 out_del_queue_kobj:
979 	kobject_del(&disk->queue_kobj);
980 	return ret;
981 }
982 
983 /**
984  * blk_unregister_queue - counterpart of blk_register_queue()
985  * @disk: Disk of which the request queue should be unregistered from sysfs.
986  *
987  * Note: the caller is responsible for guaranteeing that this function is called
988  * after blk_register_queue() has finished.
989  */
blk_unregister_queue(struct gendisk * disk)990 void blk_unregister_queue(struct gendisk *disk)
991 {
992 	struct request_queue *q = disk->queue;
993 
994 	if (WARN_ON(!q))
995 		return;
996 
997 	/* Return early if disk->queue was never registered. */
998 	if (!blk_queue_registered(q))
999 		return;
1000 
1001 	/*
1002 	 * Since sysfs_remove_dir() prevents adding new directory entries
1003 	 * before removal of existing entries starts, protect against
1004 	 * concurrent elv_iosched_store() calls.
1005 	 */
1006 	mutex_lock(&q->sysfs_lock);
1007 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
1008 	mutex_unlock(&q->sysfs_lock);
1009 
1010 	/*
1011 	 * Remove the sysfs attributes before unregistering the queue data
1012 	 * structures that can be modified through sysfs.
1013 	 */
1014 	if (queue_is_mq(q))
1015 		blk_mq_sysfs_unregister(disk);
1016 	blk_crypto_sysfs_unregister(disk);
1017 
1018 	mutex_lock(&q->sysfs_lock);
1019 	disk_unregister_independent_access_ranges(disk);
1020 	mutex_unlock(&q->sysfs_lock);
1021 
1022 	/* Now that we've deleted all child objects, we can delete the queue. */
1023 	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
1024 	kobject_del(&disk->queue_kobj);
1025 
1026 	if (queue_is_mq(q))
1027 		elevator_set_none(q);
1028 
1029 	blk_debugfs_remove(disk);
1030 }
1031