xref: /linux/block/blk-sysfs.c (revision 8b1935e6a36b0967efc593d67ed3aebbfbc1f5b1)
1 /*
2  * Functions related to sysfs handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/blktrace_api.h>
9 
10 #include "blk.h"
11 
12 struct queue_sysfs_entry {
13 	struct attribute attr;
14 	ssize_t (*show)(struct request_queue *, char *);
15 	ssize_t (*store)(struct request_queue *, const char *, size_t);
16 };
17 
18 static ssize_t
19 queue_var_show(unsigned long var, char *page)
20 {
21 	return sprintf(page, "%lu\n", var);
22 }
23 
24 static ssize_t
25 queue_var_store(unsigned long *var, const char *page, size_t count)
26 {
27 	char *p = (char *) page;
28 
29 	*var = simple_strtoul(p, &p, 10);
30 	return count;
31 }
32 
33 static ssize_t queue_requests_show(struct request_queue *q, char *page)
34 {
35 	return queue_var_show(q->nr_requests, (page));
36 }
37 
38 static ssize_t
39 queue_requests_store(struct request_queue *q, const char *page, size_t count)
40 {
41 	struct request_list *rl = &q->rq;
42 	unsigned long nr;
43 	int ret;
44 
45 	if (!q->request_fn)
46 		return -EINVAL;
47 
48 	ret = queue_var_store(&nr, page, count);
49 	if (nr < BLKDEV_MIN_RQ)
50 		nr = BLKDEV_MIN_RQ;
51 
52 	spin_lock_irq(q->queue_lock);
53 	q->nr_requests = nr;
54 	blk_queue_congestion_threshold(q);
55 
56 	if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
57 		blk_set_queue_congested(q, BLK_RW_SYNC);
58 	else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
59 		blk_clear_queue_congested(q, BLK_RW_SYNC);
60 
61 	if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
62 		blk_set_queue_congested(q, BLK_RW_ASYNC);
63 	else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
64 		blk_clear_queue_congested(q, BLK_RW_ASYNC);
65 
66 	if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
67 		blk_set_queue_full(q, BLK_RW_SYNC);
68 	} else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
69 		blk_clear_queue_full(q, BLK_RW_SYNC);
70 		wake_up(&rl->wait[BLK_RW_SYNC]);
71 	}
72 
73 	if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
74 		blk_set_queue_full(q, BLK_RW_ASYNC);
75 	} else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
76 		blk_clear_queue_full(q, BLK_RW_ASYNC);
77 		wake_up(&rl->wait[BLK_RW_ASYNC]);
78 	}
79 	spin_unlock_irq(q->queue_lock);
80 	return ret;
81 }
82 
83 static ssize_t queue_ra_show(struct request_queue *q, char *page)
84 {
85 	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
86 					(PAGE_CACHE_SHIFT - 10);
87 
88 	return queue_var_show(ra_kb, (page));
89 }
90 
91 static ssize_t
92 queue_ra_store(struct request_queue *q, const char *page, size_t count)
93 {
94 	unsigned long ra_kb;
95 	ssize_t ret = queue_var_store(&ra_kb, page, count);
96 
97 	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
98 
99 	return ret;
100 }
101 
102 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
103 {
104 	int max_sectors_kb = queue_max_sectors(q) >> 1;
105 
106 	return queue_var_show(max_sectors_kb, (page));
107 }
108 
109 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
110 {
111 	return queue_var_show(queue_logical_block_size(q), page);
112 }
113 
114 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
115 {
116 	return queue_var_show(queue_physical_block_size(q), page);
117 }
118 
119 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
120 {
121 	return queue_var_show(queue_io_min(q), page);
122 }
123 
124 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
125 {
126 	return queue_var_show(queue_io_opt(q), page);
127 }
128 
129 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
130 {
131 	return queue_var_show(q->limits.discard_granularity, page);
132 }
133 
134 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
135 {
136 	return queue_var_show(q->limits.max_discard_sectors << 9, page);
137 }
138 
139 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
140 {
141 	return queue_var_show(queue_discard_zeroes_data(q), page);
142 }
143 
144 static ssize_t
145 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
146 {
147 	unsigned long max_sectors_kb,
148 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
149 			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
150 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
151 
152 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
153 		return -EINVAL;
154 
155 	spin_lock_irq(q->queue_lock);
156 	q->limits.max_sectors = max_sectors_kb << 1;
157 	spin_unlock_irq(q->queue_lock);
158 
159 	return ret;
160 }
161 
162 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
163 {
164 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
165 
166 	return queue_var_show(max_hw_sectors_kb, (page));
167 }
168 
169 static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
170 {
171 	return queue_var_show(!blk_queue_nonrot(q), page);
172 }
173 
174 static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
175 				  size_t count)
176 {
177 	unsigned long nm;
178 	ssize_t ret = queue_var_store(&nm, page, count);
179 
180 	spin_lock_irq(q->queue_lock);
181 	if (nm)
182 		queue_flag_clear(QUEUE_FLAG_NONROT, q);
183 	else
184 		queue_flag_set(QUEUE_FLAG_NONROT, q);
185 	spin_unlock_irq(q->queue_lock);
186 
187 	return ret;
188 }
189 
190 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
191 {
192 	return queue_var_show((blk_queue_nomerges(q) << 1) |
193 			       blk_queue_noxmerges(q), page);
194 }
195 
196 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
197 				    size_t count)
198 {
199 	unsigned long nm;
200 	ssize_t ret = queue_var_store(&nm, page, count);
201 
202 	spin_lock_irq(q->queue_lock);
203 	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
204 	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
205 	if (nm == 2)
206 		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
207 	else if (nm)
208 		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
209 	spin_unlock_irq(q->queue_lock);
210 
211 	return ret;
212 }
213 
214 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
215 {
216 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
217 
218 	return queue_var_show(set, page);
219 }
220 
221 static ssize_t
222 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
223 {
224 	ssize_t ret = -EINVAL;
225 #if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
226 	unsigned long val;
227 
228 	ret = queue_var_store(&val, page, count);
229 	spin_lock_irq(q->queue_lock);
230 	if (val)
231 		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
232 	else
233 		queue_flag_clear(QUEUE_FLAG_SAME_COMP,  q);
234 	spin_unlock_irq(q->queue_lock);
235 #endif
236 	return ret;
237 }
238 
239 static ssize_t queue_iostats_show(struct request_queue *q, char *page)
240 {
241 	return queue_var_show(blk_queue_io_stat(q), page);
242 }
243 
244 static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
245 				   size_t count)
246 {
247 	unsigned long stats;
248 	ssize_t ret = queue_var_store(&stats, page, count);
249 
250 	spin_lock_irq(q->queue_lock);
251 	if (stats)
252 		queue_flag_set(QUEUE_FLAG_IO_STAT, q);
253 	else
254 		queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
255 	spin_unlock_irq(q->queue_lock);
256 
257 	return ret;
258 }
259 
260 static struct queue_sysfs_entry queue_requests_entry = {
261 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
262 	.show = queue_requests_show,
263 	.store = queue_requests_store,
264 };
265 
266 static struct queue_sysfs_entry queue_ra_entry = {
267 	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
268 	.show = queue_ra_show,
269 	.store = queue_ra_store,
270 };
271 
272 static struct queue_sysfs_entry queue_max_sectors_entry = {
273 	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
274 	.show = queue_max_sectors_show,
275 	.store = queue_max_sectors_store,
276 };
277 
278 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
279 	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
280 	.show = queue_max_hw_sectors_show,
281 };
282 
283 static struct queue_sysfs_entry queue_iosched_entry = {
284 	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
285 	.show = elv_iosched_show,
286 	.store = elv_iosched_store,
287 };
288 
289 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
290 	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
291 	.show = queue_logical_block_size_show,
292 };
293 
294 static struct queue_sysfs_entry queue_logical_block_size_entry = {
295 	.attr = {.name = "logical_block_size", .mode = S_IRUGO },
296 	.show = queue_logical_block_size_show,
297 };
298 
299 static struct queue_sysfs_entry queue_physical_block_size_entry = {
300 	.attr = {.name = "physical_block_size", .mode = S_IRUGO },
301 	.show = queue_physical_block_size_show,
302 };
303 
304 static struct queue_sysfs_entry queue_io_min_entry = {
305 	.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
306 	.show = queue_io_min_show,
307 };
308 
309 static struct queue_sysfs_entry queue_io_opt_entry = {
310 	.attr = {.name = "optimal_io_size", .mode = S_IRUGO },
311 	.show = queue_io_opt_show,
312 };
313 
314 static struct queue_sysfs_entry queue_discard_granularity_entry = {
315 	.attr = {.name = "discard_granularity", .mode = S_IRUGO },
316 	.show = queue_discard_granularity_show,
317 };
318 
319 static struct queue_sysfs_entry queue_discard_max_entry = {
320 	.attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
321 	.show = queue_discard_max_show,
322 };
323 
324 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
325 	.attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
326 	.show = queue_discard_zeroes_data_show,
327 };
328 
329 static struct queue_sysfs_entry queue_nonrot_entry = {
330 	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
331 	.show = queue_nonrot_show,
332 	.store = queue_nonrot_store,
333 };
334 
335 static struct queue_sysfs_entry queue_nomerges_entry = {
336 	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
337 	.show = queue_nomerges_show,
338 	.store = queue_nomerges_store,
339 };
340 
341 static struct queue_sysfs_entry queue_rq_affinity_entry = {
342 	.attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
343 	.show = queue_rq_affinity_show,
344 	.store = queue_rq_affinity_store,
345 };
346 
347 static struct queue_sysfs_entry queue_iostats_entry = {
348 	.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
349 	.show = queue_iostats_show,
350 	.store = queue_iostats_store,
351 };
352 
353 static struct attribute *default_attrs[] = {
354 	&queue_requests_entry.attr,
355 	&queue_ra_entry.attr,
356 	&queue_max_hw_sectors_entry.attr,
357 	&queue_max_sectors_entry.attr,
358 	&queue_iosched_entry.attr,
359 	&queue_hw_sector_size_entry.attr,
360 	&queue_logical_block_size_entry.attr,
361 	&queue_physical_block_size_entry.attr,
362 	&queue_io_min_entry.attr,
363 	&queue_io_opt_entry.attr,
364 	&queue_discard_granularity_entry.attr,
365 	&queue_discard_max_entry.attr,
366 	&queue_discard_zeroes_data_entry.attr,
367 	&queue_nonrot_entry.attr,
368 	&queue_nomerges_entry.attr,
369 	&queue_rq_affinity_entry.attr,
370 	&queue_iostats_entry.attr,
371 	NULL,
372 };
373 
374 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
375 
376 static ssize_t
377 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
378 {
379 	struct queue_sysfs_entry *entry = to_queue(attr);
380 	struct request_queue *q =
381 		container_of(kobj, struct request_queue, kobj);
382 	ssize_t res;
383 
384 	if (!entry->show)
385 		return -EIO;
386 	mutex_lock(&q->sysfs_lock);
387 	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
388 		mutex_unlock(&q->sysfs_lock);
389 		return -ENOENT;
390 	}
391 	res = entry->show(q, page);
392 	mutex_unlock(&q->sysfs_lock);
393 	return res;
394 }
395 
396 static ssize_t
397 queue_attr_store(struct kobject *kobj, struct attribute *attr,
398 		    const char *page, size_t length)
399 {
400 	struct queue_sysfs_entry *entry = to_queue(attr);
401 	struct request_queue *q;
402 	ssize_t res;
403 
404 	if (!entry->store)
405 		return -EIO;
406 
407 	q = container_of(kobj, struct request_queue, kobj);
408 	mutex_lock(&q->sysfs_lock);
409 	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
410 		mutex_unlock(&q->sysfs_lock);
411 		return -ENOENT;
412 	}
413 	res = entry->store(q, page, length);
414 	mutex_unlock(&q->sysfs_lock);
415 	return res;
416 }
417 
418 /**
419  * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
420  * @kobj:    the kobj belonging of the request queue to be released
421  *
422  * Description:
423  *     blk_cleanup_queue is the pair to blk_init_queue() or
424  *     blk_queue_make_request().  It should be called when a request queue is
425  *     being released; typically when a block device is being de-registered.
426  *     Currently, its primary task it to free all the &struct request
427  *     structures that were allocated to the queue and the queue itself.
428  *
429  * Caveat:
430  *     Hopefully the low level driver will have finished any
431  *     outstanding requests first...
432  **/
433 static void blk_release_queue(struct kobject *kobj)
434 {
435 	struct request_queue *q =
436 		container_of(kobj, struct request_queue, kobj);
437 	struct request_list *rl = &q->rq;
438 
439 	blk_sync_queue(q);
440 
441 	if (rl->rq_pool)
442 		mempool_destroy(rl->rq_pool);
443 
444 	if (q->queue_tags)
445 		__blk_queue_free_tags(q);
446 
447 	blk_trace_shutdown(q);
448 
449 	bdi_destroy(&q->backing_dev_info);
450 	kmem_cache_free(blk_requestq_cachep, q);
451 }
452 
453 static struct sysfs_ops queue_sysfs_ops = {
454 	.show	= queue_attr_show,
455 	.store	= queue_attr_store,
456 };
457 
458 struct kobj_type blk_queue_ktype = {
459 	.sysfs_ops	= &queue_sysfs_ops,
460 	.default_attrs	= default_attrs,
461 	.release	= blk_release_queue,
462 };
463 
464 int blk_register_queue(struct gendisk *disk)
465 {
466 	int ret;
467 	struct device *dev = disk_to_dev(disk);
468 
469 	struct request_queue *q = disk->queue;
470 
471 	if (WARN_ON(!q))
472 		return -ENXIO;
473 
474 	ret = blk_trace_init_sysfs(dev);
475 	if (ret)
476 		return ret;
477 
478 	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
479 	if (ret < 0)
480 		return ret;
481 
482 	kobject_uevent(&q->kobj, KOBJ_ADD);
483 
484 	if (!q->request_fn)
485 		return 0;
486 
487 	ret = elv_register_queue(q);
488 	if (ret) {
489 		kobject_uevent(&q->kobj, KOBJ_REMOVE);
490 		kobject_del(&q->kobj);
491 		blk_trace_remove_sysfs(disk_to_dev(disk));
492 		return ret;
493 	}
494 
495 	return 0;
496 }
497 
498 void blk_unregister_queue(struct gendisk *disk)
499 {
500 	struct request_queue *q = disk->queue;
501 
502 	if (WARN_ON(!q))
503 		return;
504 
505 	if (q->request_fn)
506 		elv_unregister_queue(q);
507 
508 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
509 	kobject_del(&q->kobj);
510 	blk_trace_remove_sysfs(disk_to_dev(disk));
511 	kobject_put(&disk_to_dev(disk)->kobj);
512 }
513