Lines Matching +full:page +full:- +full:based

1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/backing-dev.h>
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
25 ssize_t (*show)(struct gendisk *disk, char *page);
26 ssize_t (*show_limit)(struct gendisk *disk, char *page);
28 ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
29 int (*store_limit)(struct gendisk *disk, const char *page,
34 queue_var_show(unsigned long var, char *page) in queue_var_show() argument
36 return sysfs_emit(page, "%lu\n", var); in queue_var_show()
40 queue_var_store(unsigned long *var, const char *page, size_t count) in queue_var_store() argument
45 err = kstrtoul(page, 10, &v); in queue_var_store()
47 return -EINVAL; in queue_var_store()
54 static ssize_t queue_requests_show(struct gendisk *disk, char *page) in queue_requests_show() argument
58 mutex_lock(&disk->queue->elevator_lock); in queue_requests_show()
59 ret = queue_var_show(disk->queue->nr_requests, page); in queue_requests_show()
60 mutex_unlock(&disk->queue->elevator_lock); in queue_requests_show()
65 queue_requests_store(struct gendisk *disk, const char *page, size_t count) in queue_requests_store() argument
70 struct request_queue *q = disk->queue; in queue_requests_store()
73 return -EINVAL; in queue_requests_store()
75 ret = queue_var_store(&nr, page, count); in queue_requests_store()
80 mutex_lock(&q->elevator_lock); in queue_requests_store()
84 err = blk_mq_update_nr_requests(disk->queue, nr); in queue_requests_store()
87 mutex_unlock(&q->elevator_lock); in queue_requests_store()
92 static ssize_t queue_ra_show(struct gendisk *disk, char *page) in queue_ra_show() argument
96 mutex_lock(&disk->queue->limits_lock); in queue_ra_show()
97 ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page); in queue_ra_show()
98 mutex_unlock(&disk->queue->limits_lock); in queue_ra_show()
104 queue_ra_store(struct gendisk *disk, const char *page, size_t count) in queue_ra_store() argument
109 struct request_queue *q = disk->queue; in queue_ra_store()
111 ret = queue_var_store(&ra_kb, page, count); in queue_ra_store()
115 * ->ra_pages is protected by ->limits_lock because it is usually in queue_ra_store()
118 mutex_lock(&q->limits_lock); in queue_ra_store()
120 disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); in queue_ra_store()
121 mutex_unlock(&q->limits_lock); in queue_ra_store()
128 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
130 return queue_var_show(disk->queue->limits._field, page); \
154 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ in QUEUE_SYSFS_LIMIT_SHOW()
156 return sysfs_emit(page, "%llu\n", \ in QUEUE_SYSFS_LIMIT_SHOW()
157 (unsigned long long)disk->queue->limits._field << \ in QUEUE_SYSFS_LIMIT_SHOW()
171 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
173 return queue_var_show(disk->queue->limits._field >> 1, page); \
180 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
182 return sysfs_emit(page, "%d\n", _val); \
188 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
191 const char *page, size_t count, struct queue_limits *lim)
196 ret = queue_var_store(&max_discard_bytes, page, count);
200 if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
201 return -EINVAL;
204 return -EINVAL;
206 lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
211 const char *page, size_t count, struct queue_limits *lim) in queue_max_wzeroes_unmap_sectors_store() argument
216 ret = queue_var_store(&max_zeroes_bytes, page, count); in queue_max_wzeroes_unmap_sectors_store()
220 max_hw_zeroes_bytes = lim->max_hw_wzeroes_unmap_sectors << SECTOR_SHIFT; in queue_max_wzeroes_unmap_sectors_store()
222 return -EINVAL; in queue_max_wzeroes_unmap_sectors_store()
224 lim->max_user_wzeroes_unmap_sectors = max_zeroes_bytes >> SECTOR_SHIFT; in queue_max_wzeroes_unmap_sectors_store()
229 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count, in queue_max_sectors_store() argument
235 ret = queue_var_store(&max_sectors_kb, page, count); in queue_max_sectors_store()
239 lim->max_user_sectors = max_sectors_kb << 1; in queue_max_sectors_store()
243 static ssize_t queue_feature_store(struct gendisk *disk, const char *page, in queue_feature_store() argument
249 ret = queue_var_store(&val, page, count); in queue_feature_store()
254 lim->features |= feature; in queue_feature_store()
256 lim->features &= ~feature; in queue_feature_store()
261 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
263 return sysfs_emit(page, "%u\n", \
264 !!(disk->queue->limits.features & _feature)); \
267 const char *page, size_t count, struct queue_limits *lim) \
269 return queue_feature_store(disk, page, count, lim, _feature); \
278 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
280 return sysfs_emit(page, "%u\n", \
281 !!(disk->queue->limits.features & _feature)); \
287 static ssize_t queue_poll_show(struct gendisk *disk, char *page) in queue_poll_show() argument
289 if (queue_is_mq(disk->queue)) in queue_poll_show()
290 return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue)); in queue_poll_show()
292 return sysfs_emit(page, "%u\n", in queue_poll_show()
293 !!(disk->queue->limits.features & BLK_FEAT_POLL)); in queue_poll_show()
296 static ssize_t queue_zoned_show(struct gendisk *disk, char *page) in queue_zoned_show() argument
298 if (blk_queue_is_zoned(disk->queue)) in queue_zoned_show()
299 return sysfs_emit(page, "host-managed\n"); in queue_zoned_show()
300 return sysfs_emit(page, "none\n"); in queue_zoned_show()
303 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page) in queue_nr_zones_show() argument
305 return queue_var_show(disk_nr_zones(disk), page); in queue_nr_zones_show()
308 static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page) in queue_iostats_passthrough_show() argument
310 return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page); in queue_iostats_passthrough_show()
314 const char *page, size_t count, struct queue_limits *lim) in queue_iostats_passthrough_store() argument
319 ret = queue_var_store(&ios, page, count); in queue_iostats_passthrough_store()
324 lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; in queue_iostats_passthrough_store()
326 lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; in queue_iostats_passthrough_store()
330 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page) in queue_nomerges_show() argument
332 return queue_var_show((blk_queue_nomerges(disk->queue) << 1) | in queue_nomerges_show()
333 blk_queue_noxmerges(disk->queue), page); in queue_nomerges_show()
336 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page, in queue_nomerges_store() argument
341 struct request_queue *q = disk->queue; in queue_nomerges_store()
342 ssize_t ret = queue_var_store(&nm, page, count); in queue_nomerges_store()
359 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page) in queue_rq_affinity_show() argument
361 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags); in queue_rq_affinity_show()
362 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags); in queue_rq_affinity_show()
364 return queue_var_show(set << force, page); in queue_rq_affinity_show()
368 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count) in queue_rq_affinity_store() argument
370 ssize_t ret = -EINVAL; in queue_rq_affinity_store()
372 struct request_queue *q = disk->queue; in queue_rq_affinity_store()
376 ret = queue_var_store(&val, page, count); in queue_rq_affinity_store()
402 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page, in queue_poll_delay_store() argument
408 static ssize_t queue_poll_store(struct gendisk *disk, const char *page, in queue_poll_store() argument
413 struct request_queue *q = disk->queue; in queue_poll_store()
416 if (!(q->limits.features & BLK_FEAT_POLL)) { in queue_poll_store()
417 ret = -EINVAL; in queue_poll_store()
428 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page) in queue_io_timeout_show() argument
430 return sysfs_emit(page, "%u\n", in queue_io_timeout_show()
431 jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout))); in queue_io_timeout_show()
434 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page, in queue_io_timeout_store() argument
439 struct request_queue *q = disk->queue; in queue_io_timeout_store()
441 err = kstrtou32(page, 10, &val); in queue_io_timeout_store()
443 return -EINVAL; in queue_io_timeout_store()
452 static ssize_t queue_wc_show(struct gendisk *disk, char *page) in queue_wc_show() argument
454 if (blk_queue_write_cache(disk->queue)) in queue_wc_show()
455 return sysfs_emit(page, "write back\n"); in queue_wc_show()
456 return sysfs_emit(page, "write through\n"); in queue_wc_show()
459 static int queue_wc_store(struct gendisk *disk, const char *page, in queue_wc_store() argument
464 if (!strncmp(page, "write back", 10)) { in queue_wc_store()
466 } else if (!strncmp(page, "write through", 13) || in queue_wc_store()
467 !strncmp(page, "none", 4)) { in queue_wc_store()
470 return -EINVAL; in queue_wc_store()
474 lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED; in queue_wc_store()
476 lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; in queue_wc_store()
573 static ssize_t queue_var_store64(s64 *var, const char *page) in queue_var_store64() argument
578 err = kstrtos64(page, 10, &v); in queue_var_store64()
586 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page) in queue_wb_lat_show() argument
589 struct request_queue *q = disk->queue; in queue_wb_lat_show()
591 mutex_lock(&disk->rqos_state_mutex); in queue_wb_lat_show()
593 ret = -EINVAL; in queue_wb_lat_show()
598 ret = sysfs_emit(page, "0\n"); in queue_wb_lat_show()
602 ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); in queue_wb_lat_show()
604 mutex_unlock(&disk->rqos_state_mutex); in queue_wb_lat_show()
608 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page, in queue_wb_lat_store() argument
611 struct request_queue *q = disk->queue; in queue_wb_lat_store()
617 ret = queue_var_store64(&val, page); in queue_wb_lat_store()
620 if (val < -1) in queue_wb_lat_store()
621 return -EINVAL; in queue_wb_lat_store()
633 if (val == -1) in queue_wb_lat_store()
648 mutex_lock(&disk->rqos_state_mutex); in queue_wb_lat_store()
650 mutex_unlock(&disk->rqos_state_mutex); in queue_wb_lat_store()
662 /* Common attributes for bio-based and request-based queues. */
665 * Attributes which are protected with q->limits_lock.
721 /* Request-based queue attributes that are not relevant for bio-based queues. */
725 * q->sysfs_lock.
745 struct request_queue *q = disk->queue; in queue_attr_visible()
752 return attr->mode; in queue_attr_visible()
759 struct request_queue *q = disk->queue; in blk_mq_queue_attr_visible()
764 if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout) in blk_mq_queue_attr_visible()
767 return attr->mode; in blk_mq_queue_attr_visible()
783 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) in queue_attr_show() argument
788 if (!entry->show && !entry->show_limit) in queue_attr_show()
789 return -EIO; in queue_attr_show()
791 if (entry->show_limit) { in queue_attr_show()
794 mutex_lock(&disk->queue->limits_lock); in queue_attr_show()
795 res = entry->show_limit(disk, page); in queue_attr_show()
796 mutex_unlock(&disk->queue->limits_lock); in queue_attr_show()
800 return entry->show(disk, page); in queue_attr_show()
805 const char *page, size_t length) in queue_attr_store() argument
809 struct request_queue *q = disk->queue; in queue_attr_store()
811 if (!entry->store_limit && !entry->store) in queue_attr_store()
812 return -EIO; in queue_attr_store()
814 if (entry->store_limit) { in queue_attr_store()
819 res = entry->store_limit(disk, page, length, &lim); in queue_attr_store()
831 return entry->store(disk, page, length); in queue_attr_store()
858 struct request_queue *q = disk->queue; in blk_debugfs_remove()
860 mutex_lock(&q->debugfs_mutex); in blk_debugfs_remove()
862 debugfs_remove_recursive(q->debugfs_dir); in blk_debugfs_remove()
863 q->debugfs_dir = NULL; in blk_debugfs_remove()
864 q->sched_debugfs_dir = NULL; in blk_debugfs_remove()
865 q->rqos_debugfs_dir = NULL; in blk_debugfs_remove()
866 mutex_unlock(&q->debugfs_mutex); in blk_debugfs_remove()
870 * blk_register_queue - register a block layer queue with sysfs
875 struct request_queue *q = disk->queue; in blk_register_queue()
878 kobject_init(&disk->queue_kobj, &blk_queue_ktype); in blk_register_queue()
879 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); in blk_register_queue()
888 mutex_lock(&q->sysfs_lock); in blk_register_queue()
890 mutex_lock(&q->debugfs_mutex); in blk_register_queue()
891 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); in blk_register_queue()
894 mutex_unlock(&q->debugfs_mutex); in blk_register_queue()
911 kobject_uevent(&disk->queue_kobj, KOBJ_ADD); in blk_register_queue()
912 if (q->elevator) in blk_register_queue()
913 kobject_uevent(&q->elevator->kobj, KOBJ_ADD); in blk_register_queue()
914 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
918 * request_queues for non-existent devices. Shutting down a fully in blk_register_queue()
923 * request_queues for non-existent devices never get registered. in blk_register_queue()
926 percpu_ref_switch_to_percpu(&q->q_usage_counter); in blk_register_queue()
934 mutex_unlock(&q->sysfs_lock); in blk_register_queue()
938 kobject_put(&disk->queue_kobj); in blk_register_queue()
943 * blk_unregister_queue - counterpart of blk_register_queue()
951 struct request_queue *q = disk->queue; in blk_unregister_queue()
956 /* Return early if disk->queue was never registered. */ in blk_unregister_queue()
965 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
967 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
977 mutex_lock(&q->sysfs_lock); in blk_unregister_queue()
979 mutex_unlock(&q->sysfs_lock); in blk_unregister_queue()
982 kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); in blk_unregister_queue()
983 kobject_del(&disk->queue_kobj); in blk_unregister_queue()
989 kobject_put(&disk->queue_kobj); in blk_unregister_queue()