1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to sysfs handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
13
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-wbt.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
22
23 struct queue_sysfs_entry {
24 struct attribute attr;
25 ssize_t (*show)(struct gendisk *disk, char *page);
26 int (*load_module)(struct gendisk *disk, const char *page, size_t count);
27 ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
28 };
29
30 static ssize_t
queue_var_show(unsigned long var,char * page)31 queue_var_show(unsigned long var, char *page)
32 {
33 return sprintf(page, "%lu\n", var);
34 }
35
36 static ssize_t
queue_var_store(unsigned long * var,const char * page,size_t count)37 queue_var_store(unsigned long *var, const char *page, size_t count)
38 {
39 int err;
40 unsigned long v;
41
42 err = kstrtoul(page, 10, &v);
43 if (err || v > UINT_MAX)
44 return -EINVAL;
45
46 *var = v;
47
48 return count;
49 }
50
queue_requests_show(struct gendisk * disk,char * page)51 static ssize_t queue_requests_show(struct gendisk *disk, char *page)
52 {
53 return queue_var_show(disk->queue->nr_requests, page);
54 }
55
56 static ssize_t
queue_requests_store(struct gendisk * disk,const char * page,size_t count)57 queue_requests_store(struct gendisk *disk, const char *page, size_t count)
58 {
59 unsigned long nr;
60 int ret, err;
61
62 if (!queue_is_mq(disk->queue))
63 return -EINVAL;
64
65 ret = queue_var_store(&nr, page, count);
66 if (ret < 0)
67 return ret;
68
69 if (nr < BLKDEV_MIN_RQ)
70 nr = BLKDEV_MIN_RQ;
71
72 err = blk_mq_update_nr_requests(disk->queue, nr);
73 if (err)
74 return err;
75
76 return ret;
77 }
78
queue_ra_show(struct gendisk * disk,char * page)79 static ssize_t queue_ra_show(struct gendisk *disk, char *page)
80 {
81 return queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
82 }
83
84 static ssize_t
queue_ra_store(struct gendisk * disk,const char * page,size_t count)85 queue_ra_store(struct gendisk *disk, const char *page, size_t count)
86 {
87 unsigned long ra_kb;
88 ssize_t ret;
89
90 ret = queue_var_store(&ra_kb, page, count);
91 if (ret < 0)
92 return ret;
93 disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
94 return ret;
95 }
96
97 #define QUEUE_SYSFS_LIMIT_SHOW(_field) \
98 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
99 { \
100 return queue_var_show(disk->queue->limits._field, page); \
101 }
102
103 QUEUE_SYSFS_LIMIT_SHOW(max_segments)
QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)104 QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments)
105 QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments)
106 QUEUE_SYSFS_LIMIT_SHOW(max_segment_size)
107 QUEUE_SYSFS_LIMIT_SHOW(logical_block_size)
108 QUEUE_SYSFS_LIMIT_SHOW(physical_block_size)
109 QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors)
110 QUEUE_SYSFS_LIMIT_SHOW(io_min)
111 QUEUE_SYSFS_LIMIT_SHOW(io_opt)
112 QUEUE_SYSFS_LIMIT_SHOW(discard_granularity)
113 QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity)
114 QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask)
115 QUEUE_SYSFS_LIMIT_SHOW(dma_alignment)
116 QUEUE_SYSFS_LIMIT_SHOW(max_open_zones)
117 QUEUE_SYSFS_LIMIT_SHOW(max_active_zones)
118 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min)
119 QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max)
120
121 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field) \
122 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
123 { \
124 return sprintf(page, "%llu\n", \
125 (unsigned long long)disk->queue->limits._field << \
126 SECTOR_SHIFT); \
127 }
128
129 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors)
130 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors)
131 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors)
132 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors)
133 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors)
134
135 #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \
136 static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \
137 { \
138 return queue_var_show(disk->queue->limits._field >> 1, page); \
139 }
140
141 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors)
142 QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors)
143
144 #define QUEUE_SYSFS_SHOW_CONST(_name, _val) \
145 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
146 { \
147 return sprintf(page, "%d\n", _val); \
148 }
149
150 /* deprecated fields */
151 QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
152 QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
153 QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
154
155 static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
156 const char *page, size_t count)
157 {
158 unsigned long max_discard_bytes;
159 struct queue_limits lim;
160 ssize_t ret;
161 int err;
162
163 ret = queue_var_store(&max_discard_bytes, page, count);
164 if (ret < 0)
165 return ret;
166
167 if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1))
168 return -EINVAL;
169
170 if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
171 return -EINVAL;
172
173 lim = queue_limits_start_update(disk->queue);
174 lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
175 err = queue_limits_commit_update(disk->queue, &lim);
176 if (err)
177 return err;
178 return ret;
179 }
180
181 /*
182 * For zone append queue_max_zone_append_sectors does not just return the
183 * underlying queue limits, but actually contains a calculation. Because of
184 * that we can't simply use QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES here.
185 */
queue_zone_append_max_show(struct gendisk * disk,char * page)186 static ssize_t queue_zone_append_max_show(struct gendisk *disk, char *page)
187 {
188 return sprintf(page, "%llu\n",
189 (u64)queue_max_zone_append_sectors(disk->queue) <<
190 SECTOR_SHIFT);
191 }
192
193 static ssize_t
queue_max_sectors_store(struct gendisk * disk,const char * page,size_t count)194 queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count)
195 {
196 unsigned long max_sectors_kb;
197 struct queue_limits lim;
198 ssize_t ret;
199 int err;
200
201 ret = queue_var_store(&max_sectors_kb, page, count);
202 if (ret < 0)
203 return ret;
204
205 lim = queue_limits_start_update(disk->queue);
206 lim.max_user_sectors = max_sectors_kb << 1;
207 err = queue_limits_commit_update(disk->queue, &lim);
208 if (err)
209 return err;
210 return ret;
211 }
212
queue_feature_store(struct gendisk * disk,const char * page,size_t count,blk_features_t feature)213 static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
214 size_t count, blk_features_t feature)
215 {
216 struct queue_limits lim;
217 unsigned long val;
218 ssize_t ret;
219
220 ret = queue_var_store(&val, page, count);
221 if (ret < 0)
222 return ret;
223
224 lim = queue_limits_start_update(disk->queue);
225 if (val)
226 lim.features |= feature;
227 else
228 lim.features &= ~feature;
229 ret = queue_limits_commit_update(disk->queue, &lim);
230 if (ret)
231 return ret;
232 return count;
233 }
234
235 #define QUEUE_SYSFS_FEATURE(_name, _feature) \
236 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
237 { \
238 return sprintf(page, "%u\n", \
239 !!(disk->queue->limits.features & _feature)); \
240 } \
241 static ssize_t queue_##_name##_store(struct gendisk *disk, \
242 const char *page, size_t count) \
243 { \
244 return queue_feature_store(disk, page, count, _feature); \
245 }
246
247 QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
248 QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM)
249 QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT)
250 QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES);
251
252 #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature) \
253 static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \
254 { \
255 return sprintf(page, "%u\n", \
256 !!(disk->queue->limits.features & _feature)); \
257 }
258
259 QUEUE_SYSFS_FEATURE_SHOW(poll, BLK_FEAT_POLL);
260 QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
261 QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
262
queue_zoned_show(struct gendisk * disk,char * page)263 static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
264 {
265 if (blk_queue_is_zoned(disk->queue))
266 return sprintf(page, "host-managed\n");
267 return sprintf(page, "none\n");
268 }
269
queue_nr_zones_show(struct gendisk * disk,char * page)270 static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
271 {
272 return queue_var_show(disk_nr_zones(disk), page);
273 }
274
queue_nomerges_show(struct gendisk * disk,char * page)275 static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
276 {
277 return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
278 blk_queue_noxmerges(disk->queue), page);
279 }
280
queue_nomerges_store(struct gendisk * disk,const char * page,size_t count)281 static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
282 size_t count)
283 {
284 unsigned long nm;
285 ssize_t ret = queue_var_store(&nm, page, count);
286
287 if (ret < 0)
288 return ret;
289
290 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, disk->queue);
291 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, disk->queue);
292 if (nm == 2)
293 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, disk->queue);
294 else if (nm)
295 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, disk->queue);
296
297 return ret;
298 }
299
queue_rq_affinity_show(struct gendisk * disk,char * page)300 static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page)
301 {
302 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags);
303 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags);
304
305 return queue_var_show(set << force, page);
306 }
307
308 static ssize_t
queue_rq_affinity_store(struct gendisk * disk,const char * page,size_t count)309 queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
310 {
311 ssize_t ret = -EINVAL;
312 #ifdef CONFIG_SMP
313 struct request_queue *q = disk->queue;
314 unsigned long val;
315
316 ret = queue_var_store(&val, page, count);
317 if (ret < 0)
318 return ret;
319
320 if (val == 2) {
321 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
322 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
323 } else if (val == 1) {
324 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
325 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
326 } else if (val == 0) {
327 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
328 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
329 }
330 #endif
331 return ret;
332 }
333
queue_poll_delay_store(struct gendisk * disk,const char * page,size_t count)334 static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
335 size_t count)
336 {
337 return count;
338 }
339
queue_poll_store(struct gendisk * disk,const char * page,size_t count)340 static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
341 size_t count)
342 {
343 if (!(disk->queue->limits.features & BLK_FEAT_POLL))
344 return -EINVAL;
345 pr_info_ratelimited("writes to the poll attribute are ignored.\n");
346 pr_info_ratelimited("please use driver specific parameters instead.\n");
347 return count;
348 }
349
queue_io_timeout_show(struct gendisk * disk,char * page)350 static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
351 {
352 return sprintf(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
353 }
354
queue_io_timeout_store(struct gendisk * disk,const char * page,size_t count)355 static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
356 size_t count)
357 {
358 unsigned int val;
359 int err;
360
361 err = kstrtou32(page, 10, &val);
362 if (err || val == 0)
363 return -EINVAL;
364
365 blk_queue_rq_timeout(disk->queue, msecs_to_jiffies(val));
366
367 return count;
368 }
369
queue_wc_show(struct gendisk * disk,char * page)370 static ssize_t queue_wc_show(struct gendisk *disk, char *page)
371 {
372 if (blk_queue_write_cache(disk->queue))
373 return sprintf(page, "write back\n");
374 return sprintf(page, "write through\n");
375 }
376
queue_wc_store(struct gendisk * disk,const char * page,size_t count)377 static ssize_t queue_wc_store(struct gendisk *disk, const char *page,
378 size_t count)
379 {
380 struct queue_limits lim;
381 bool disable;
382 int err;
383
384 if (!strncmp(page, "write back", 10)) {
385 disable = false;
386 } else if (!strncmp(page, "write through", 13) ||
387 !strncmp(page, "none", 4)) {
388 disable = true;
389 } else {
390 return -EINVAL;
391 }
392
393 lim = queue_limits_start_update(disk->queue);
394 if (disable)
395 lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
396 else
397 lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
398 err = queue_limits_commit_update(disk->queue, &lim);
399 if (err)
400 return err;
401 return count;
402 }
403
404 #define QUEUE_RO_ENTRY(_prefix, _name) \
405 static struct queue_sysfs_entry _prefix##_entry = { \
406 .attr = { .name = _name, .mode = 0444 }, \
407 .show = _prefix##_show, \
408 };
409
410 #define QUEUE_RW_ENTRY(_prefix, _name) \
411 static struct queue_sysfs_entry _prefix##_entry = { \
412 .attr = { .name = _name, .mode = 0644 }, \
413 .show = _prefix##_show, \
414 .store = _prefix##_store, \
415 };
416
417 #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name) \
418 static struct queue_sysfs_entry _prefix##_entry = { \
419 .attr = { .name = _name, .mode = 0644 }, \
420 .show = _prefix##_show, \
421 .load_module = _prefix##_load_module, \
422 .store = _prefix##_store, \
423 }
424
425 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
426 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
427 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
428 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
429 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
430 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
431 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
432 QUEUE_RW_LOAD_MODULE_ENTRY(elv_iosched, "scheduler");
433
434 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
435 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
436 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
437 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
438 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
439
440 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
441 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
442 QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
443 QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
444 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
445
446 QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
447 QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
448 "atomic_write_boundary_bytes");
449 QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
450 QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
451
452 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
453 QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
454 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
455 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
456
457 QUEUE_RO_ENTRY(queue_zoned, "zoned");
458 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
459 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
460 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
461
462 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
463 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
464 QUEUE_RW_ENTRY(queue_poll, "io_poll");
465 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
466 QUEUE_RW_ENTRY(queue_wc, "write_cache");
467 QUEUE_RO_ENTRY(queue_fua, "fua");
468 QUEUE_RO_ENTRY(queue_dax, "dax");
469 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
470 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
471 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
472
473 /* legacy alias for logical_block_size: */
474 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
475 .attr = {.name = "hw_sector_size", .mode = 0444 },
476 .show = queue_logical_block_size_show,
477 };
478
479 QUEUE_RW_ENTRY(queue_rotational, "rotational");
480 QUEUE_RW_ENTRY(queue_iostats, "iostats");
481 QUEUE_RW_ENTRY(queue_add_random, "add_random");
482 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
483
484 #ifdef CONFIG_BLK_WBT
queue_var_store64(s64 * var,const char * page)485 static ssize_t queue_var_store64(s64 *var, const char *page)
486 {
487 int err;
488 s64 v;
489
490 err = kstrtos64(page, 10, &v);
491 if (err < 0)
492 return err;
493
494 *var = v;
495 return 0;
496 }
497
queue_wb_lat_show(struct gendisk * disk,char * page)498 static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
499 {
500 if (!wbt_rq_qos(disk->queue))
501 return -EINVAL;
502
503 if (wbt_disabled(disk->queue))
504 return sprintf(page, "0\n");
505
506 return sprintf(page, "%llu\n",
507 div_u64(wbt_get_min_lat(disk->queue), 1000));
508 }
509
queue_wb_lat_store(struct gendisk * disk,const char * page,size_t count)510 static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
511 size_t count)
512 {
513 struct request_queue *q = disk->queue;
514 struct rq_qos *rqos;
515 ssize_t ret;
516 s64 val;
517
518 ret = queue_var_store64(&val, page);
519 if (ret < 0)
520 return ret;
521 if (val < -1)
522 return -EINVAL;
523
524 rqos = wbt_rq_qos(q);
525 if (!rqos) {
526 ret = wbt_init(disk);
527 if (ret)
528 return ret;
529 }
530
531 if (val == -1)
532 val = wbt_default_latency_nsec(q);
533 else if (val >= 0)
534 val *= 1000ULL;
535
536 if (wbt_get_min_lat(q) == val)
537 return count;
538
539 /*
540 * Ensure that the queue is idled, in case the latency update
541 * ends up either enabling or disabling wbt completely. We can't
542 * have IO inflight if that happens.
543 */
544 blk_mq_quiesce_queue(q);
545
546 wbt_set_min_lat(q, val);
547
548 blk_mq_unquiesce_queue(q);
549
550 return count;
551 }
552
553 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
554 #endif
555
556 /* Common attributes for bio-based and request-based queues. */
557 static struct attribute *queue_attrs[] = {
558 &queue_ra_entry.attr,
559 &queue_max_hw_sectors_entry.attr,
560 &queue_max_sectors_entry.attr,
561 &queue_max_segments_entry.attr,
562 &queue_max_discard_segments_entry.attr,
563 &queue_max_integrity_segments_entry.attr,
564 &queue_max_segment_size_entry.attr,
565 &queue_hw_sector_size_entry.attr,
566 &queue_logical_block_size_entry.attr,
567 &queue_physical_block_size_entry.attr,
568 &queue_chunk_sectors_entry.attr,
569 &queue_io_min_entry.attr,
570 &queue_io_opt_entry.attr,
571 &queue_discard_granularity_entry.attr,
572 &queue_max_discard_sectors_entry.attr,
573 &queue_max_hw_discard_sectors_entry.attr,
574 &queue_discard_zeroes_data_entry.attr,
575 &queue_atomic_write_max_sectors_entry.attr,
576 &queue_atomic_write_boundary_sectors_entry.attr,
577 &queue_atomic_write_unit_min_entry.attr,
578 &queue_atomic_write_unit_max_entry.attr,
579 &queue_write_same_max_entry.attr,
580 &queue_max_write_zeroes_sectors_entry.attr,
581 &queue_zone_append_max_entry.attr,
582 &queue_zone_write_granularity_entry.attr,
583 &queue_rotational_entry.attr,
584 &queue_zoned_entry.attr,
585 &queue_nr_zones_entry.attr,
586 &queue_max_open_zones_entry.attr,
587 &queue_max_active_zones_entry.attr,
588 &queue_nomerges_entry.attr,
589 &queue_iostats_entry.attr,
590 &queue_stable_writes_entry.attr,
591 &queue_add_random_entry.attr,
592 &queue_poll_entry.attr,
593 &queue_wc_entry.attr,
594 &queue_fua_entry.attr,
595 &queue_dax_entry.attr,
596 &queue_poll_delay_entry.attr,
597 &queue_virt_boundary_mask_entry.attr,
598 &queue_dma_alignment_entry.attr,
599 NULL,
600 };
601
602 /* Request-based queue attributes that are not relevant for bio-based queues. */
603 static struct attribute *blk_mq_queue_attrs[] = {
604 &queue_requests_entry.attr,
605 &elv_iosched_entry.attr,
606 &queue_rq_affinity_entry.attr,
607 &queue_io_timeout_entry.attr,
608 #ifdef CONFIG_BLK_WBT
609 &queue_wb_lat_entry.attr,
610 #endif
611 NULL,
612 };
613
queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)614 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
615 int n)
616 {
617 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
618 struct request_queue *q = disk->queue;
619
620 if ((attr == &queue_max_open_zones_entry.attr ||
621 attr == &queue_max_active_zones_entry.attr) &&
622 !blk_queue_is_zoned(q))
623 return 0;
624
625 return attr->mode;
626 }
627
blk_mq_queue_attr_visible(struct kobject * kobj,struct attribute * attr,int n)628 static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
629 struct attribute *attr, int n)
630 {
631 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
632 struct request_queue *q = disk->queue;
633
634 if (!queue_is_mq(q))
635 return 0;
636
637 if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
638 return 0;
639
640 return attr->mode;
641 }
642
643 static struct attribute_group queue_attr_group = {
644 .attrs = queue_attrs,
645 .is_visible = queue_attr_visible,
646 };
647
648 static struct attribute_group blk_mq_queue_attr_group = {
649 .attrs = blk_mq_queue_attrs,
650 .is_visible = blk_mq_queue_attr_visible,
651 };
652
653 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
654
655 static ssize_t
queue_attr_show(struct kobject * kobj,struct attribute * attr,char * page)656 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
657 {
658 struct queue_sysfs_entry *entry = to_queue(attr);
659 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
660 ssize_t res;
661
662 if (!entry->show)
663 return -EIO;
664 mutex_lock(&disk->queue->sysfs_lock);
665 res = entry->show(disk, page);
666 mutex_unlock(&disk->queue->sysfs_lock);
667 return res;
668 }
669
670 static ssize_t
queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)671 queue_attr_store(struct kobject *kobj, struct attribute *attr,
672 const char *page, size_t length)
673 {
674 struct queue_sysfs_entry *entry = to_queue(attr);
675 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
676 struct request_queue *q = disk->queue;
677 ssize_t res;
678
679 if (!entry->store)
680 return -EIO;
681
682 /*
683 * If the attribute needs to load a module, do it before freezing the
684 * queue to ensure that the module file can be read when the request
685 * queue is the one for the device storing the module file.
686 */
687 if (entry->load_module) {
688 res = entry->load_module(disk, page, length);
689 if (res)
690 return res;
691 }
692
693 blk_mq_freeze_queue(q);
694 mutex_lock(&q->sysfs_lock);
695 res = entry->store(disk, page, length);
696 mutex_unlock(&q->sysfs_lock);
697 blk_mq_unfreeze_queue(q);
698 return res;
699 }
700
701 static const struct sysfs_ops queue_sysfs_ops = {
702 .show = queue_attr_show,
703 .store = queue_attr_store,
704 };
705
706 static const struct attribute_group *blk_queue_attr_groups[] = {
707 &queue_attr_group,
708 &blk_mq_queue_attr_group,
709 NULL
710 };
711
blk_queue_release(struct kobject * kobj)712 static void blk_queue_release(struct kobject *kobj)
713 {
714 /* nothing to do here, all data is associated with the parent gendisk */
715 }
716
717 static const struct kobj_type blk_queue_ktype = {
718 .default_groups = blk_queue_attr_groups,
719 .sysfs_ops = &queue_sysfs_ops,
720 .release = blk_queue_release,
721 };
722
blk_debugfs_remove(struct gendisk * disk)723 static void blk_debugfs_remove(struct gendisk *disk)
724 {
725 struct request_queue *q = disk->queue;
726
727 mutex_lock(&q->debugfs_mutex);
728 blk_trace_shutdown(q);
729 debugfs_remove_recursive(q->debugfs_dir);
730 q->debugfs_dir = NULL;
731 q->sched_debugfs_dir = NULL;
732 q->rqos_debugfs_dir = NULL;
733 mutex_unlock(&q->debugfs_mutex);
734 }
735
736 /**
737 * blk_register_queue - register a block layer queue with sysfs
738 * @disk: Disk of which the request queue should be registered with sysfs.
739 */
blk_register_queue(struct gendisk * disk)740 int blk_register_queue(struct gendisk *disk)
741 {
742 struct request_queue *q = disk->queue;
743 int ret;
744
745 mutex_lock(&q->sysfs_dir_lock);
746 kobject_init(&disk->queue_kobj, &blk_queue_ktype);
747 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
748 if (ret < 0)
749 goto out_put_queue_kobj;
750
751 if (queue_is_mq(q)) {
752 ret = blk_mq_sysfs_register(disk);
753 if (ret)
754 goto out_put_queue_kobj;
755 }
756 mutex_lock(&q->sysfs_lock);
757
758 mutex_lock(&q->debugfs_mutex);
759 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
760 if (queue_is_mq(q))
761 blk_mq_debugfs_register(q);
762 mutex_unlock(&q->debugfs_mutex);
763
764 ret = disk_register_independent_access_ranges(disk);
765 if (ret)
766 goto out_debugfs_remove;
767
768 if (q->elevator) {
769 ret = elv_register_queue(q, false);
770 if (ret)
771 goto out_unregister_ia_ranges;
772 }
773
774 ret = blk_crypto_sysfs_register(disk);
775 if (ret)
776 goto out_elv_unregister;
777
778 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
779 wbt_enable_default(disk);
780
781 /* Now everything is ready and send out KOBJ_ADD uevent */
782 kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
783 if (q->elevator)
784 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
785 mutex_unlock(&q->sysfs_lock);
786 mutex_unlock(&q->sysfs_dir_lock);
787
788 /*
789 * SCSI probing may synchronously create and destroy a lot of
790 * request_queues for non-existent devices. Shutting down a fully
791 * functional queue takes measureable wallclock time as RCU grace
792 * periods are involved. To avoid excessive latency in these
793 * cases, a request_queue starts out in a degraded mode which is
794 * faster to shut down and is made fully functional here as
795 * request_queues for non-existent devices never get registered.
796 */
797 if (!blk_queue_init_done(q)) {
798 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
799 percpu_ref_switch_to_percpu(&q->q_usage_counter);
800 }
801
802 return ret;
803
804 out_elv_unregister:
805 elv_unregister_queue(q);
806 out_unregister_ia_ranges:
807 disk_unregister_independent_access_ranges(disk);
808 out_debugfs_remove:
809 blk_debugfs_remove(disk);
810 mutex_unlock(&q->sysfs_lock);
811 out_put_queue_kobj:
812 kobject_put(&disk->queue_kobj);
813 mutex_unlock(&q->sysfs_dir_lock);
814 return ret;
815 }
816
817 /**
818 * blk_unregister_queue - counterpart of blk_register_queue()
819 * @disk: Disk of which the request queue should be unregistered from sysfs.
820 *
821 * Note: the caller is responsible for guaranteeing that this function is called
822 * after blk_register_queue() has finished.
823 */
blk_unregister_queue(struct gendisk * disk)824 void blk_unregister_queue(struct gendisk *disk)
825 {
826 struct request_queue *q = disk->queue;
827
828 if (WARN_ON(!q))
829 return;
830
831 /* Return early if disk->queue was never registered. */
832 if (!blk_queue_registered(q))
833 return;
834
835 /*
836 * Since sysfs_remove_dir() prevents adding new directory entries
837 * before removal of existing entries starts, protect against
838 * concurrent elv_iosched_store() calls.
839 */
840 mutex_lock(&q->sysfs_lock);
841 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
842 mutex_unlock(&q->sysfs_lock);
843
844 mutex_lock(&q->sysfs_dir_lock);
845 /*
846 * Remove the sysfs attributes before unregistering the queue data
847 * structures that can be modified through sysfs.
848 */
849 if (queue_is_mq(q))
850 blk_mq_sysfs_unregister(disk);
851 blk_crypto_sysfs_unregister(disk);
852
853 mutex_lock(&q->sysfs_lock);
854 elv_unregister_queue(q);
855 disk_unregister_independent_access_ranges(disk);
856 mutex_unlock(&q->sysfs_lock);
857
858 /* Now that we've deleted all child objects, we can delete the queue. */
859 kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
860 kobject_del(&disk->queue_kobj);
861 mutex_unlock(&q->sysfs_dir_lock);
862
863 blk_debugfs_remove(disk);
864 }
865