xref: /linux/block/blk-sysfs.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  * Functions related to sysfs handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blktrace_api.h>
10 
11 #include "blk.h"
12 #include "blk-cgroup.h"
13 
14 struct queue_sysfs_entry {
15 	struct attribute attr;
16 	ssize_t (*show)(struct request_queue *, char *);
17 	ssize_t (*store)(struct request_queue *, const char *, size_t);
18 };
19 
20 static ssize_t
21 queue_var_show(unsigned long var, char *page)
22 {
23 	return sprintf(page, "%lu\n", var);
24 }
25 
26 static ssize_t
27 queue_var_store(unsigned long *var, const char *page, size_t count)
28 {
29 	char *p = (char *) page;
30 
31 	*var = simple_strtoul(p, &p, 10);
32 	return count;
33 }
34 
35 static ssize_t queue_requests_show(struct request_queue *q, char *page)
36 {
37 	return queue_var_show(q->nr_requests, (page));
38 }
39 
40 static ssize_t
41 queue_requests_store(struct request_queue *q, const char *page, size_t count)
42 {
43 	struct request_list *rl = &q->rq;
44 	unsigned long nr;
45 	int ret;
46 
47 	if (!q->request_fn)
48 		return -EINVAL;
49 
50 	ret = queue_var_store(&nr, page, count);
51 	if (nr < BLKDEV_MIN_RQ)
52 		nr = BLKDEV_MIN_RQ;
53 
54 	spin_lock_irq(q->queue_lock);
55 	q->nr_requests = nr;
56 	blk_queue_congestion_threshold(q);
57 
58 	if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
59 		blk_set_queue_congested(q, BLK_RW_SYNC);
60 	else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
61 		blk_clear_queue_congested(q, BLK_RW_SYNC);
62 
63 	if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
64 		blk_set_queue_congested(q, BLK_RW_ASYNC);
65 	else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
66 		blk_clear_queue_congested(q, BLK_RW_ASYNC);
67 
68 	if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
69 		blk_set_queue_full(q, BLK_RW_SYNC);
70 	} else {
71 		blk_clear_queue_full(q, BLK_RW_SYNC);
72 		wake_up(&rl->wait[BLK_RW_SYNC]);
73 	}
74 
75 	if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
76 		blk_set_queue_full(q, BLK_RW_ASYNC);
77 	} else {
78 		blk_clear_queue_full(q, BLK_RW_ASYNC);
79 		wake_up(&rl->wait[BLK_RW_ASYNC]);
80 	}
81 	spin_unlock_irq(q->queue_lock);
82 	return ret;
83 }
84 
85 static ssize_t queue_ra_show(struct request_queue *q, char *page)
86 {
87 	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
88 					(PAGE_CACHE_SHIFT - 10);
89 
90 	return queue_var_show(ra_kb, (page));
91 }
92 
93 static ssize_t
94 queue_ra_store(struct request_queue *q, const char *page, size_t count)
95 {
96 	unsigned long ra_kb;
97 	ssize_t ret = queue_var_store(&ra_kb, page, count);
98 
99 	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
100 
101 	return ret;
102 }
103 
104 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
105 {
106 	int max_sectors_kb = queue_max_sectors(q) >> 1;
107 
108 	return queue_var_show(max_sectors_kb, (page));
109 }
110 
111 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
112 {
113 	return queue_var_show(queue_max_segments(q), (page));
114 }
115 
116 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
117 {
118 	return queue_var_show(q->limits.max_integrity_segments, (page));
119 }
120 
121 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
122 {
123 	if (blk_queue_cluster(q))
124 		return queue_var_show(queue_max_segment_size(q), (page));
125 
126 	return queue_var_show(PAGE_CACHE_SIZE, (page));
127 }
128 
129 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
130 {
131 	return queue_var_show(queue_logical_block_size(q), page);
132 }
133 
134 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
135 {
136 	return queue_var_show(queue_physical_block_size(q), page);
137 }
138 
139 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
140 {
141 	return queue_var_show(queue_io_min(q), page);
142 }
143 
144 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
145 {
146 	return queue_var_show(queue_io_opt(q), page);
147 }
148 
149 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
150 {
151 	return queue_var_show(q->limits.discard_granularity, page);
152 }
153 
154 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
155 {
156 	return sprintf(page, "%llu\n",
157 		       (unsigned long long)q->limits.max_discard_sectors << 9);
158 }
159 
160 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
161 {
162 	return queue_var_show(queue_discard_zeroes_data(q), page);
163 }
164 
165 static ssize_t
166 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
167 {
168 	unsigned long max_sectors_kb,
169 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
170 			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
171 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
172 
173 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
174 		return -EINVAL;
175 
176 	spin_lock_irq(q->queue_lock);
177 	q->limits.max_sectors = max_sectors_kb << 1;
178 	spin_unlock_irq(q->queue_lock);
179 
180 	return ret;
181 }
182 
183 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
184 {
185 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
186 
187 	return queue_var_show(max_hw_sectors_kb, (page));
188 }
189 
190 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
191 static ssize_t								\
192 queue_show_##name(struct request_queue *q, char *page)			\
193 {									\
194 	int bit;							\
195 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
196 	return queue_var_show(neg ? !bit : bit, page);			\
197 }									\
198 static ssize_t								\
199 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
200 {									\
201 	unsigned long val;						\
202 	ssize_t ret;							\
203 	ret = queue_var_store(&val, page, count);			\
204 	if (neg)							\
205 		val = !val;						\
206 									\
207 	spin_lock_irq(q->queue_lock);					\
208 	if (val)							\
209 		queue_flag_set(QUEUE_FLAG_##flag, q);			\
210 	else								\
211 		queue_flag_clear(QUEUE_FLAG_##flag, q);			\
212 	spin_unlock_irq(q->queue_lock);					\
213 	return ret;							\
214 }
215 
216 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
217 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
218 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
219 #undef QUEUE_SYSFS_BIT_FNS
220 
221 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
222 {
223 	return queue_var_show((blk_queue_nomerges(q) << 1) |
224 			       blk_queue_noxmerges(q), page);
225 }
226 
227 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
228 				    size_t count)
229 {
230 	unsigned long nm;
231 	ssize_t ret = queue_var_store(&nm, page, count);
232 
233 	spin_lock_irq(q->queue_lock);
234 	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
235 	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
236 	if (nm == 2)
237 		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
238 	else if (nm)
239 		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
240 	spin_unlock_irq(q->queue_lock);
241 
242 	return ret;
243 }
244 
245 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
246 {
247 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
248 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
249 
250 	return queue_var_show(set << force, page);
251 }
252 
253 static ssize_t
254 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
255 {
256 	ssize_t ret = -EINVAL;
257 #if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
258 	unsigned long val;
259 
260 	ret = queue_var_store(&val, page, count);
261 	spin_lock_irq(q->queue_lock);
262 	if (val == 2) {
263 		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
264 		queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
265 	} else if (val == 1) {
266 		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
267 		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
268 	} else if (val == 0) {
269 		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
270 		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
271 	}
272 	spin_unlock_irq(q->queue_lock);
273 #endif
274 	return ret;
275 }
276 
277 static struct queue_sysfs_entry queue_requests_entry = {
278 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
279 	.show = queue_requests_show,
280 	.store = queue_requests_store,
281 };
282 
283 static struct queue_sysfs_entry queue_ra_entry = {
284 	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
285 	.show = queue_ra_show,
286 	.store = queue_ra_store,
287 };
288 
289 static struct queue_sysfs_entry queue_max_sectors_entry = {
290 	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
291 	.show = queue_max_sectors_show,
292 	.store = queue_max_sectors_store,
293 };
294 
295 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
296 	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
297 	.show = queue_max_hw_sectors_show,
298 };
299 
300 static struct queue_sysfs_entry queue_max_segments_entry = {
301 	.attr = {.name = "max_segments", .mode = S_IRUGO },
302 	.show = queue_max_segments_show,
303 };
304 
305 static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
306 	.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
307 	.show = queue_max_integrity_segments_show,
308 };
309 
310 static struct queue_sysfs_entry queue_max_segment_size_entry = {
311 	.attr = {.name = "max_segment_size", .mode = S_IRUGO },
312 	.show = queue_max_segment_size_show,
313 };
314 
315 static struct queue_sysfs_entry queue_iosched_entry = {
316 	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
317 	.show = elv_iosched_show,
318 	.store = elv_iosched_store,
319 };
320 
321 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
322 	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
323 	.show = queue_logical_block_size_show,
324 };
325 
326 static struct queue_sysfs_entry queue_logical_block_size_entry = {
327 	.attr = {.name = "logical_block_size", .mode = S_IRUGO },
328 	.show = queue_logical_block_size_show,
329 };
330 
331 static struct queue_sysfs_entry queue_physical_block_size_entry = {
332 	.attr = {.name = "physical_block_size", .mode = S_IRUGO },
333 	.show = queue_physical_block_size_show,
334 };
335 
336 static struct queue_sysfs_entry queue_io_min_entry = {
337 	.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
338 	.show = queue_io_min_show,
339 };
340 
341 static struct queue_sysfs_entry queue_io_opt_entry = {
342 	.attr = {.name = "optimal_io_size", .mode = S_IRUGO },
343 	.show = queue_io_opt_show,
344 };
345 
346 static struct queue_sysfs_entry queue_discard_granularity_entry = {
347 	.attr = {.name = "discard_granularity", .mode = S_IRUGO },
348 	.show = queue_discard_granularity_show,
349 };
350 
351 static struct queue_sysfs_entry queue_discard_max_entry = {
352 	.attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
353 	.show = queue_discard_max_show,
354 };
355 
356 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
357 	.attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
358 	.show = queue_discard_zeroes_data_show,
359 };
360 
361 static struct queue_sysfs_entry queue_nonrot_entry = {
362 	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
363 	.show = queue_show_nonrot,
364 	.store = queue_store_nonrot,
365 };
366 
367 static struct queue_sysfs_entry queue_nomerges_entry = {
368 	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
369 	.show = queue_nomerges_show,
370 	.store = queue_nomerges_store,
371 };
372 
373 static struct queue_sysfs_entry queue_rq_affinity_entry = {
374 	.attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
375 	.show = queue_rq_affinity_show,
376 	.store = queue_rq_affinity_store,
377 };
378 
379 static struct queue_sysfs_entry queue_iostats_entry = {
380 	.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
381 	.show = queue_show_iostats,
382 	.store = queue_store_iostats,
383 };
384 
385 static struct queue_sysfs_entry queue_random_entry = {
386 	.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
387 	.show = queue_show_random,
388 	.store = queue_store_random,
389 };
390 
391 static struct attribute *default_attrs[] = {
392 	&queue_requests_entry.attr,
393 	&queue_ra_entry.attr,
394 	&queue_max_hw_sectors_entry.attr,
395 	&queue_max_sectors_entry.attr,
396 	&queue_max_segments_entry.attr,
397 	&queue_max_integrity_segments_entry.attr,
398 	&queue_max_segment_size_entry.attr,
399 	&queue_iosched_entry.attr,
400 	&queue_hw_sector_size_entry.attr,
401 	&queue_logical_block_size_entry.attr,
402 	&queue_physical_block_size_entry.attr,
403 	&queue_io_min_entry.attr,
404 	&queue_io_opt_entry.attr,
405 	&queue_discard_granularity_entry.attr,
406 	&queue_discard_max_entry.attr,
407 	&queue_discard_zeroes_data_entry.attr,
408 	&queue_nonrot_entry.attr,
409 	&queue_nomerges_entry.attr,
410 	&queue_rq_affinity_entry.attr,
411 	&queue_iostats_entry.attr,
412 	&queue_random_entry.attr,
413 	NULL,
414 };
415 
416 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
417 
418 static ssize_t
419 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
420 {
421 	struct queue_sysfs_entry *entry = to_queue(attr);
422 	struct request_queue *q =
423 		container_of(kobj, struct request_queue, kobj);
424 	ssize_t res;
425 
426 	if (!entry->show)
427 		return -EIO;
428 	mutex_lock(&q->sysfs_lock);
429 	if (blk_queue_dead(q)) {
430 		mutex_unlock(&q->sysfs_lock);
431 		return -ENOENT;
432 	}
433 	res = entry->show(q, page);
434 	mutex_unlock(&q->sysfs_lock);
435 	return res;
436 }
437 
438 static ssize_t
439 queue_attr_store(struct kobject *kobj, struct attribute *attr,
440 		    const char *page, size_t length)
441 {
442 	struct queue_sysfs_entry *entry = to_queue(attr);
443 	struct request_queue *q;
444 	ssize_t res;
445 
446 	if (!entry->store)
447 		return -EIO;
448 
449 	q = container_of(kobj, struct request_queue, kobj);
450 	mutex_lock(&q->sysfs_lock);
451 	if (blk_queue_dead(q)) {
452 		mutex_unlock(&q->sysfs_lock);
453 		return -ENOENT;
454 	}
455 	res = entry->store(q, page, length);
456 	mutex_unlock(&q->sysfs_lock);
457 	return res;
458 }
459 
460 /**
461  * blk_release_queue: - release a &struct request_queue when it is no longer needed
462  * @kobj:    the kobj belonging to the request queue to be released
463  *
464  * Description:
465  *     blk_release_queue is the pair to blk_init_queue() or
466  *     blk_queue_make_request().  It should be called when a request queue is
467  *     being released; typically when a block device is being de-registered.
468  *     Currently, its primary task it to free all the &struct request
469  *     structures that were allocated to the queue and the queue itself.
470  *
471  * Caveat:
472  *     Hopefully the low level driver will have finished any
473  *     outstanding requests first...
474  **/
475 static void blk_release_queue(struct kobject *kobj)
476 {
477 	struct request_queue *q =
478 		container_of(kobj, struct request_queue, kobj);
479 	struct request_list *rl = &q->rq;
480 
481 	blk_sync_queue(q);
482 
483 	blkcg_exit_queue(q);
484 
485 	if (q->elevator) {
486 		spin_lock_irq(q->queue_lock);
487 		ioc_clear_queue(q);
488 		spin_unlock_irq(q->queue_lock);
489 		elevator_exit(q->elevator);
490 	}
491 
492 	if (rl->rq_pool)
493 		mempool_destroy(rl->rq_pool);
494 
495 	if (q->queue_tags)
496 		__blk_queue_free_tags(q);
497 
498 	blk_trace_shutdown(q);
499 
500 	bdi_destroy(&q->backing_dev_info);
501 
502 	ida_simple_remove(&blk_queue_ida, q->id);
503 	kmem_cache_free(blk_requestq_cachep, q);
504 }
505 
506 static const struct sysfs_ops queue_sysfs_ops = {
507 	.show	= queue_attr_show,
508 	.store	= queue_attr_store,
509 };
510 
511 struct kobj_type blk_queue_ktype = {
512 	.sysfs_ops	= &queue_sysfs_ops,
513 	.default_attrs	= default_attrs,
514 	.release	= blk_release_queue,
515 };
516 
517 int blk_register_queue(struct gendisk *disk)
518 {
519 	int ret;
520 	struct device *dev = disk_to_dev(disk);
521 	struct request_queue *q = disk->queue;
522 
523 	if (WARN_ON(!q))
524 		return -ENXIO;
525 
526 	ret = blk_trace_init_sysfs(dev);
527 	if (ret)
528 		return ret;
529 
530 	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
531 	if (ret < 0) {
532 		blk_trace_remove_sysfs(dev);
533 		return ret;
534 	}
535 
536 	kobject_uevent(&q->kobj, KOBJ_ADD);
537 
538 	if (!q->request_fn)
539 		return 0;
540 
541 	ret = elv_register_queue(q);
542 	if (ret) {
543 		kobject_uevent(&q->kobj, KOBJ_REMOVE);
544 		kobject_del(&q->kobj);
545 		blk_trace_remove_sysfs(dev);
546 		kobject_put(&dev->kobj);
547 		return ret;
548 	}
549 
550 	return 0;
551 }
552 
553 void blk_unregister_queue(struct gendisk *disk)
554 {
555 	struct request_queue *q = disk->queue;
556 
557 	if (WARN_ON(!q))
558 		return;
559 
560 	if (q->request_fn)
561 		elv_unregister_queue(q);
562 
563 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
564 	kobject_del(&q->kobj);
565 	blk_trace_remove_sysfs(disk_to_dev(disk));
566 	kobject_put(&disk_to_dev(disk)->kobj);
567 }
568