1 /* 2 * Functions related to sysfs handling 3 */ 4 #include <linux/kernel.h> 5 #include <linux/slab.h> 6 #include <linux/module.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/blktrace_api.h> 10 11 #include "blk.h" 12 13 struct queue_sysfs_entry { 14 struct attribute attr; 15 ssize_t (*show)(struct request_queue *, char *); 16 ssize_t (*store)(struct request_queue *, const char *, size_t); 17 }; 18 19 static ssize_t 20 queue_var_show(unsigned long var, char *page) 21 { 22 return sprintf(page, "%lu\n", var); 23 } 24 25 static ssize_t 26 queue_var_store(unsigned long *var, const char *page, size_t count) 27 { 28 char *p = (char *) page; 29 30 *var = simple_strtoul(p, &p, 10); 31 return count; 32 } 33 34 static ssize_t queue_requests_show(struct request_queue *q, char *page) 35 { 36 return queue_var_show(q->nr_requests, (page)); 37 } 38 39 static ssize_t 40 queue_requests_store(struct request_queue *q, const char *page, size_t count) 41 { 42 struct request_list *rl = &q->rq; 43 unsigned long nr; 44 int ret; 45 46 if (!q->request_fn) 47 return -EINVAL; 48 49 ret = queue_var_store(&nr, page, count); 50 if (nr < BLKDEV_MIN_RQ) 51 nr = BLKDEV_MIN_RQ; 52 53 spin_lock_irq(q->queue_lock); 54 q->nr_requests = nr; 55 blk_queue_congestion_threshold(q); 56 57 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) 58 blk_set_queue_congested(q, BLK_RW_SYNC); 59 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) 60 blk_clear_queue_congested(q, BLK_RW_SYNC); 61 62 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) 63 blk_set_queue_congested(q, BLK_RW_ASYNC); 64 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) 65 blk_clear_queue_congested(q, BLK_RW_ASYNC); 66 67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 68 blk_set_queue_full(q, BLK_RW_SYNC); 69 } else { 70 blk_clear_queue_full(q, BLK_RW_SYNC); 71 wake_up(&rl->wait[BLK_RW_SYNC]); 72 } 73 74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 75 blk_set_queue_full(q, BLK_RW_ASYNC); 76 } else { 77 blk_clear_queue_full(q, BLK_RW_ASYNC); 78 wake_up(&rl->wait[BLK_RW_ASYNC]); 79 } 80 spin_unlock_irq(q->queue_lock); 81 return ret; 82 } 83 84 static ssize_t queue_ra_show(struct request_queue *q, char *page) 85 { 86 unsigned long ra_kb = q->backing_dev_info.ra_pages << 87 (PAGE_CACHE_SHIFT - 10); 88 89 return queue_var_show(ra_kb, (page)); 90 } 91 92 static ssize_t 93 queue_ra_store(struct request_queue *q, const char *page, size_t count) 94 { 95 unsigned long ra_kb; 96 ssize_t ret = queue_var_store(&ra_kb, page, count); 97 98 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); 99 100 return ret; 101 } 102 103 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 104 { 105 int max_sectors_kb = queue_max_sectors(q) >> 1; 106 107 return queue_var_show(max_sectors_kb, (page)); 108 } 109 110 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 111 { 112 return queue_var_show(queue_max_segments(q), (page)); 113 } 114 115 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 116 { 117 return queue_var_show(q->limits.max_integrity_segments, (page)); 118 } 119 120 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 121 { 122 if (blk_queue_cluster(q)) 123 return queue_var_show(queue_max_segment_size(q), (page)); 124 125 return queue_var_show(PAGE_CACHE_SIZE, (page)); 126 } 127 128 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 129 { 130 return queue_var_show(queue_logical_block_size(q), page); 131 } 132 133 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 134 { 135 return queue_var_show(queue_physical_block_size(q), page); 136 } 137 138 static ssize_t queue_io_min_show(struct request_queue *q, char *page) 139 { 140 return queue_var_show(queue_io_min(q), page); 141 } 142 143 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 144 { 145 return queue_var_show(queue_io_opt(q), page); 146 } 147 148 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 149 { 150 return queue_var_show(q->limits.discard_granularity, page); 151 } 152 153 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 154 { 155 return sprintf(page, "%llu\n", 156 (unsigned long long)q->limits.max_discard_sectors << 9); 157 } 158 159 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 160 { 161 return queue_var_show(queue_discard_zeroes_data(q), page); 162 } 163 164 static ssize_t 165 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 166 { 167 unsigned long max_sectors_kb, 168 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 169 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 170 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 171 172 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 173 return -EINVAL; 174 175 spin_lock_irq(q->queue_lock); 176 q->limits.max_sectors = max_sectors_kb << 1; 177 spin_unlock_irq(q->queue_lock); 178 179 return ret; 180 } 181 182 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 183 { 184 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 185 186 return queue_var_show(max_hw_sectors_kb, (page)); 187 } 188 189 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 190 static ssize_t \ 191 queue_show_##name(struct request_queue *q, char *page) \ 192 { \ 193 int bit; \ 194 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 195 return queue_var_show(neg ? !bit : bit, page); \ 196 } \ 197 static ssize_t \ 198 queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 199 { \ 200 unsigned long val; \ 201 ssize_t ret; \ 202 ret = queue_var_store(&val, page, count); \ 203 if (neg) \ 204 val = !val; \ 205 \ 206 spin_lock_irq(q->queue_lock); \ 207 if (val) \ 208 queue_flag_set(QUEUE_FLAG_##flag, q); \ 209 else \ 210 queue_flag_clear(QUEUE_FLAG_##flag, q); \ 211 spin_unlock_irq(q->queue_lock); \ 212 return ret; \ 213 } 214 215 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 216 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 217 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 218 #undef QUEUE_SYSFS_BIT_FNS 219 220 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 221 { 222 return queue_var_show((blk_queue_nomerges(q) << 1) | 223 blk_queue_noxmerges(q), page); 224 } 225 226 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 227 size_t count) 228 { 229 unsigned long nm; 230 ssize_t ret = queue_var_store(&nm, page, count); 231 232 spin_lock_irq(q->queue_lock); 233 queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 234 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 235 if (nm == 2) 236 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 237 else if (nm) 238 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 239 spin_unlock_irq(q->queue_lock); 240 241 return ret; 242 } 243 244 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 245 { 246 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 247 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 248 249 return queue_var_show(set << force, page); 250 } 251 252 static ssize_t 253 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 254 { 255 ssize_t ret = -EINVAL; 256 #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) 257 unsigned long val; 258 259 ret = queue_var_store(&val, page, count); 260 spin_lock_irq(q->queue_lock); 261 if (val) { 262 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 263 if (val == 2) 264 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 265 } else { 266 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 267 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 268 } 269 spin_unlock_irq(q->queue_lock); 270 #endif 271 return ret; 272 } 273 274 static struct queue_sysfs_entry queue_requests_entry = { 275 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 276 .show = queue_requests_show, 277 .store = queue_requests_store, 278 }; 279 280 static struct queue_sysfs_entry queue_ra_entry = { 281 .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, 282 .show = queue_ra_show, 283 .store = queue_ra_store, 284 }; 285 286 static struct queue_sysfs_entry queue_max_sectors_entry = { 287 .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, 288 .show = queue_max_sectors_show, 289 .store = queue_max_sectors_store, 290 }; 291 292 static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 293 .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, 294 .show = queue_max_hw_sectors_show, 295 }; 296 297 static struct queue_sysfs_entry queue_max_segments_entry = { 298 .attr = {.name = "max_segments", .mode = S_IRUGO }, 299 .show = queue_max_segments_show, 300 }; 301 302 static struct queue_sysfs_entry queue_max_integrity_segments_entry = { 303 .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, 304 .show = queue_max_integrity_segments_show, 305 }; 306 307 static struct queue_sysfs_entry queue_max_segment_size_entry = { 308 .attr = {.name = "max_segment_size", .mode = S_IRUGO }, 309 .show = queue_max_segment_size_show, 310 }; 311 312 static struct queue_sysfs_entry queue_iosched_entry = { 313 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 314 .show = elv_iosched_show, 315 .store = elv_iosched_store, 316 }; 317 318 static struct queue_sysfs_entry queue_hw_sector_size_entry = { 319 .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 320 .show = queue_logical_block_size_show, 321 }; 322 323 static struct queue_sysfs_entry queue_logical_block_size_entry = { 324 .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 325 .show = queue_logical_block_size_show, 326 }; 327 328 static struct queue_sysfs_entry queue_physical_block_size_entry = { 329 .attr = {.name = "physical_block_size", .mode = S_IRUGO }, 330 .show = queue_physical_block_size_show, 331 }; 332 333 static struct queue_sysfs_entry queue_io_min_entry = { 334 .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, 335 .show = queue_io_min_show, 336 }; 337 338 static struct queue_sysfs_entry queue_io_opt_entry = { 339 .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, 340 .show = queue_io_opt_show, 341 }; 342 343 static struct queue_sysfs_entry queue_discard_granularity_entry = { 344 .attr = {.name = "discard_granularity", .mode = S_IRUGO }, 345 .show = queue_discard_granularity_show, 346 }; 347 348 static struct queue_sysfs_entry queue_discard_max_entry = { 349 .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, 350 .show = queue_discard_max_show, 351 }; 352 353 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 354 .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, 355 .show = queue_discard_zeroes_data_show, 356 }; 357 358 static struct queue_sysfs_entry queue_nonrot_entry = { 359 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 360 .show = queue_show_nonrot, 361 .store = queue_store_nonrot, 362 }; 363 364 static struct queue_sysfs_entry queue_nomerges_entry = { 365 .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, 366 .show = queue_nomerges_show, 367 .store = queue_nomerges_store, 368 }; 369 370 static struct queue_sysfs_entry queue_rq_affinity_entry = { 371 .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, 372 .show = queue_rq_affinity_show, 373 .store = queue_rq_affinity_store, 374 }; 375 376 static struct queue_sysfs_entry queue_iostats_entry = { 377 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 378 .show = queue_show_iostats, 379 .store = queue_store_iostats, 380 }; 381 382 static struct queue_sysfs_entry queue_random_entry = { 383 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, 384 .show = queue_show_random, 385 .store = queue_store_random, 386 }; 387 388 static struct attribute *default_attrs[] = { 389 &queue_requests_entry.attr, 390 &queue_ra_entry.attr, 391 &queue_max_hw_sectors_entry.attr, 392 &queue_max_sectors_entry.attr, 393 &queue_max_segments_entry.attr, 394 &queue_max_integrity_segments_entry.attr, 395 &queue_max_segment_size_entry.attr, 396 &queue_iosched_entry.attr, 397 &queue_hw_sector_size_entry.attr, 398 &queue_logical_block_size_entry.attr, 399 &queue_physical_block_size_entry.attr, 400 &queue_io_min_entry.attr, 401 &queue_io_opt_entry.attr, 402 &queue_discard_granularity_entry.attr, 403 &queue_discard_max_entry.attr, 404 &queue_discard_zeroes_data_entry.attr, 405 &queue_nonrot_entry.attr, 406 &queue_nomerges_entry.attr, 407 &queue_rq_affinity_entry.attr, 408 &queue_iostats_entry.attr, 409 &queue_random_entry.attr, 410 NULL, 411 }; 412 413 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 414 415 static ssize_t 416 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 417 { 418 struct queue_sysfs_entry *entry = to_queue(attr); 419 struct request_queue *q = 420 container_of(kobj, struct request_queue, kobj); 421 ssize_t res; 422 423 if (!entry->show) 424 return -EIO; 425 mutex_lock(&q->sysfs_lock); 426 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 427 mutex_unlock(&q->sysfs_lock); 428 return -ENOENT; 429 } 430 res = entry->show(q, page); 431 mutex_unlock(&q->sysfs_lock); 432 return res; 433 } 434 435 static ssize_t 436 queue_attr_store(struct kobject *kobj, struct attribute *attr, 437 const char *page, size_t length) 438 { 439 struct queue_sysfs_entry *entry = to_queue(attr); 440 struct request_queue *q; 441 ssize_t res; 442 443 if (!entry->store) 444 return -EIO; 445 446 q = container_of(kobj, struct request_queue, kobj); 447 mutex_lock(&q->sysfs_lock); 448 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 449 mutex_unlock(&q->sysfs_lock); 450 return -ENOENT; 451 } 452 res = entry->store(q, page, length); 453 mutex_unlock(&q->sysfs_lock); 454 return res; 455 } 456 457 /** 458 * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed 459 * @kobj: the kobj belonging of the request queue to be released 460 * 461 * Description: 462 * blk_cleanup_queue is the pair to blk_init_queue() or 463 * blk_queue_make_request(). It should be called when a request queue is 464 * being released; typically when a block device is being de-registered. 465 * Currently, its primary task it to free all the &struct request 466 * structures that were allocated to the queue and the queue itself. 467 * 468 * Caveat: 469 * Hopefully the low level driver will have finished any 470 * outstanding requests first... 471 **/ 472 static void blk_release_queue(struct kobject *kobj) 473 { 474 struct request_queue *q = 475 container_of(kobj, struct request_queue, kobj); 476 struct request_list *rl = &q->rq; 477 478 blk_sync_queue(q); 479 480 if (rl->rq_pool) 481 mempool_destroy(rl->rq_pool); 482 483 if (q->queue_tags) 484 __blk_queue_free_tags(q); 485 486 blk_trace_shutdown(q); 487 488 bdi_destroy(&q->backing_dev_info); 489 kmem_cache_free(blk_requestq_cachep, q); 490 } 491 492 static const struct sysfs_ops queue_sysfs_ops = { 493 .show = queue_attr_show, 494 .store = queue_attr_store, 495 }; 496 497 struct kobj_type blk_queue_ktype = { 498 .sysfs_ops = &queue_sysfs_ops, 499 .default_attrs = default_attrs, 500 .release = blk_release_queue, 501 }; 502 503 int blk_register_queue(struct gendisk *disk) 504 { 505 int ret; 506 struct device *dev = disk_to_dev(disk); 507 struct request_queue *q = disk->queue; 508 509 if (WARN_ON(!q)) 510 return -ENXIO; 511 512 ret = blk_trace_init_sysfs(dev); 513 if (ret) 514 return ret; 515 516 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 517 if (ret < 0) { 518 blk_trace_remove_sysfs(dev); 519 return ret; 520 } 521 522 kobject_uevent(&q->kobj, KOBJ_ADD); 523 524 if (!q->request_fn) 525 return 0; 526 527 ret = elv_register_queue(q); 528 if (ret) { 529 kobject_uevent(&q->kobj, KOBJ_REMOVE); 530 kobject_del(&q->kobj); 531 blk_trace_remove_sysfs(dev); 532 kobject_put(&dev->kobj); 533 return ret; 534 } 535 536 return 0; 537 } 538 539 void blk_unregister_queue(struct gendisk *disk) 540 { 541 struct request_queue *q = disk->queue; 542 543 if (WARN_ON(!q)) 544 return; 545 546 if (q->request_fn) 547 elv_unregister_queue(q); 548 549 kobject_uevent(&q->kobj, KOBJ_REMOVE); 550 kobject_del(&q->kobj); 551 blk_trace_remove_sysfs(disk_to_dev(disk)); 552 kobject_put(&disk_to_dev(disk)->kobj); 553 } 554