1 /* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "request.h" 13 14 #include <linux/buffer_head.h> 15 #include <linux/debugfs.h> 16 #include <linux/genhd.h> 17 #include <linux/module.h> 18 #include <linux/random.h> 19 #include <linux/reboot.h> 20 #include <linux/sysfs.h> 21 22 MODULE_LICENSE("GPL"); 23 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); 24 25 static const char bcache_magic[] = { 26 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca, 27 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81 28 }; 29 30 static const char invalid_uuid[] = { 31 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78, 32 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99 33 }; 34 35 /* Default is -1; we skip past it for struct cached_dev's cache mode */ 36 const char * const bch_cache_modes[] = { 37 "default", 38 "writethrough", 39 "writeback", 40 "writearound", 41 "none", 42 NULL 43 }; 44 45 struct uuid_entry_v0 { 46 uint8_t uuid[16]; 47 uint8_t label[32]; 48 uint32_t first_reg; 49 uint32_t last_reg; 50 uint32_t invalidated; 51 uint32_t pad; 52 }; 53 54 static struct kobject *bcache_kobj; 55 struct mutex bch_register_lock; 56 LIST_HEAD(bch_cache_sets); 57 static LIST_HEAD(uncached_devices); 58 59 static int bcache_major, bcache_minor; 60 static wait_queue_head_t unregister_wait; 61 struct workqueue_struct *bcache_wq; 62 63 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 64 65 static void bio_split_pool_free(struct bio_split_pool *p) 66 { 67 if (p->bio_split_hook) 68 mempool_destroy(p->bio_split_hook); 69 70 if (p->bio_split) 71 bioset_free(p->bio_split); 72 } 73 74 static int bio_split_pool_init(struct bio_split_pool *p) 75 { 76 p->bio_split = bioset_create(4, 0); 77 if (!p->bio_split) 78 return -ENOMEM; 79 80 p->bio_split_hook = mempool_create_kmalloc_pool(4, 81 sizeof(struct bio_split_hook)); 82 if (!p->bio_split_hook) 83 return -ENOMEM; 84 85 return 0; 86 } 87 88 /* Superblock */ 89 90 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, 91 struct page **res) 92 { 93 const char *err; 94 struct cache_sb *s; 95 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); 96 unsigned i; 97 98 if (!bh) 99 return "IO error"; 100 101 s = (struct cache_sb *) bh->b_data; 102 103 sb->offset = le64_to_cpu(s->offset); 104 sb->version = le64_to_cpu(s->version); 105 106 memcpy(sb->magic, s->magic, 16); 107 memcpy(sb->uuid, s->uuid, 16); 108 memcpy(sb->set_uuid, s->set_uuid, 16); 109 memcpy(sb->label, s->label, SB_LABEL_SIZE); 110 111 sb->flags = le64_to_cpu(s->flags); 112 sb->seq = le64_to_cpu(s->seq); 113 sb->last_mount = le32_to_cpu(s->last_mount); 114 sb->first_bucket = le16_to_cpu(s->first_bucket); 115 sb->keys = le16_to_cpu(s->keys); 116 117 for (i = 0; i < SB_JOURNAL_BUCKETS; i++) 118 sb->d[i] = le64_to_cpu(s->d[i]); 119 120 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u", 121 sb->version, sb->flags, sb->seq, sb->keys); 122 123 err = "Not a bcache superblock"; 124 if (sb->offset != SB_SECTOR) 125 goto err; 126 127 if (memcmp(sb->magic, bcache_magic, 16)) 128 goto err; 129 130 err = "Too many journal buckets"; 131 if (sb->keys > SB_JOURNAL_BUCKETS) 132 goto err; 133 134 err = "Bad checksum"; 135 if (s->csum != csum_set(s)) 136 goto err; 137 138 err = "Bad UUID"; 139 if (bch_is_zero(sb->uuid, 16)) 140 goto err; 141 142 sb->block_size = le16_to_cpu(s->block_size); 143 144 err = "Superblock block size smaller than device block size"; 145 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) 146 goto err; 147 148 switch (sb->version) { 149 case BCACHE_SB_VERSION_BDEV: 150 sb->data_offset = BDEV_DATA_START_DEFAULT; 151 break; 152 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: 153 sb->data_offset = le64_to_cpu(s->data_offset); 154 155 err = "Bad data offset"; 156 if (sb->data_offset < BDEV_DATA_START_DEFAULT) 157 goto err; 158 159 break; 160 case BCACHE_SB_VERSION_CDEV: 161 case BCACHE_SB_VERSION_CDEV_WITH_UUID: 162 sb->nbuckets = le64_to_cpu(s->nbuckets); 163 sb->block_size = le16_to_cpu(s->block_size); 164 sb->bucket_size = le16_to_cpu(s->bucket_size); 165 166 sb->nr_in_set = le16_to_cpu(s->nr_in_set); 167 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); 168 169 err = "Too many buckets"; 170 if (sb->nbuckets > LONG_MAX) 171 goto err; 172 173 err = "Not enough buckets"; 174 if (sb->nbuckets < 1 << 7) 175 goto err; 176 177 err = "Bad block/bucket size"; 178 if (!is_power_of_2(sb->block_size) || 179 sb->block_size > PAGE_SECTORS || 180 !is_power_of_2(sb->bucket_size) || 181 sb->bucket_size < PAGE_SECTORS) 182 goto err; 183 184 err = "Invalid superblock: device too small"; 185 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) 186 goto err; 187 188 err = "Bad UUID"; 189 if (bch_is_zero(sb->set_uuid, 16)) 190 goto err; 191 192 err = "Bad cache device number in set"; 193 if (!sb->nr_in_set || 194 sb->nr_in_set <= sb->nr_this_dev || 195 sb->nr_in_set > MAX_CACHES_PER_SET) 196 goto err; 197 198 err = "Journal buckets not sequential"; 199 for (i = 0; i < sb->keys; i++) 200 if (sb->d[i] != sb->first_bucket + i) 201 goto err; 202 203 err = "Too many journal buckets"; 204 if (sb->first_bucket + sb->keys > sb->nbuckets) 205 goto err; 206 207 err = "Invalid superblock: first bucket comes before end of super"; 208 if (sb->first_bucket * sb->bucket_size < 16) 209 goto err; 210 211 break; 212 default: 213 err = "Unsupported superblock version"; 214 goto err; 215 } 216 217 sb->last_mount = get_seconds(); 218 err = NULL; 219 220 get_page(bh->b_page); 221 *res = bh->b_page; 222 err: 223 put_bh(bh); 224 return err; 225 } 226 227 static void write_bdev_super_endio(struct bio *bio, int error) 228 { 229 struct cached_dev *dc = bio->bi_private; 230 /* XXX: error checking */ 231 232 closure_put(&dc->sb_write.cl); 233 } 234 235 static void __write_super(struct cache_sb *sb, struct bio *bio) 236 { 237 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 238 unsigned i; 239 240 bio->bi_sector = SB_SECTOR; 241 bio->bi_rw = REQ_SYNC|REQ_META; 242 bio->bi_size = SB_SIZE; 243 bch_bio_map(bio, NULL); 244 245 out->offset = cpu_to_le64(sb->offset); 246 out->version = cpu_to_le64(sb->version); 247 248 memcpy(out->uuid, sb->uuid, 16); 249 memcpy(out->set_uuid, sb->set_uuid, 16); 250 memcpy(out->label, sb->label, SB_LABEL_SIZE); 251 252 out->flags = cpu_to_le64(sb->flags); 253 out->seq = cpu_to_le64(sb->seq); 254 255 out->last_mount = cpu_to_le32(sb->last_mount); 256 out->first_bucket = cpu_to_le16(sb->first_bucket); 257 out->keys = cpu_to_le16(sb->keys); 258 259 for (i = 0; i < sb->keys; i++) 260 out->d[i] = cpu_to_le64(sb->d[i]); 261 262 out->csum = csum_set(out); 263 264 pr_debug("ver %llu, flags %llu, seq %llu", 265 sb->version, sb->flags, sb->seq); 266 267 submit_bio(REQ_WRITE, bio); 268 } 269 270 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) 271 { 272 struct closure *cl = &dc->sb_write.cl; 273 struct bio *bio = &dc->sb_bio; 274 275 closure_lock(&dc->sb_write, parent); 276 277 bio_reset(bio); 278 bio->bi_bdev = dc->bdev; 279 bio->bi_end_io = write_bdev_super_endio; 280 bio->bi_private = dc; 281 282 closure_get(cl); 283 __write_super(&dc->sb, bio); 284 285 closure_return(cl); 286 } 287 288 static void write_super_endio(struct bio *bio, int error) 289 { 290 struct cache *ca = bio->bi_private; 291 292 bch_count_io_errors(ca, error, "writing superblock"); 293 closure_put(&ca->set->sb_write.cl); 294 } 295 296 void bcache_write_super(struct cache_set *c) 297 { 298 struct closure *cl = &c->sb_write.cl; 299 struct cache *ca; 300 unsigned i; 301 302 closure_lock(&c->sb_write, &c->cl); 303 304 c->sb.seq++; 305 306 for_each_cache(ca, c, i) { 307 struct bio *bio = &ca->sb_bio; 308 309 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; 310 ca->sb.seq = c->sb.seq; 311 ca->sb.last_mount = c->sb.last_mount; 312 313 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 314 315 bio_reset(bio); 316 bio->bi_bdev = ca->bdev; 317 bio->bi_end_io = write_super_endio; 318 bio->bi_private = ca; 319 320 closure_get(cl); 321 __write_super(&ca->sb, bio); 322 } 323 324 closure_return(cl); 325 } 326 327 /* UUID io */ 328 329 static void uuid_endio(struct bio *bio, int error) 330 { 331 struct closure *cl = bio->bi_private; 332 struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl); 333 334 cache_set_err_on(error, c, "accessing uuids"); 335 bch_bbio_free(bio, c); 336 closure_put(cl); 337 } 338 339 static void uuid_io(struct cache_set *c, unsigned long rw, 340 struct bkey *k, struct closure *parent) 341 { 342 struct closure *cl = &c->uuid_write.cl; 343 struct uuid_entry *u; 344 unsigned i; 345 346 BUG_ON(!parent); 347 closure_lock(&c->uuid_write, parent); 348 349 for (i = 0; i < KEY_PTRS(k); i++) { 350 struct bio *bio = bch_bbio_alloc(c); 351 352 bio->bi_rw = REQ_SYNC|REQ_META|rw; 353 bio->bi_size = KEY_SIZE(k) << 9; 354 355 bio->bi_end_io = uuid_endio; 356 bio->bi_private = cl; 357 bch_bio_map(bio, c->uuids); 358 359 bch_submit_bbio(bio, c, k, i); 360 361 if (!(rw & WRITE)) 362 break; 363 } 364 365 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", 366 pkey(&c->uuid_bucket)); 367 368 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 369 if (!bch_is_zero(u->uuid, 16)) 370 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 371 u - c->uuids, u->uuid, u->label, 372 u->first_reg, u->last_reg, u->invalidated); 373 374 closure_return(cl); 375 } 376 377 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) 378 { 379 struct bkey *k = &j->uuid_bucket; 380 381 if (__bch_ptr_invalid(c, 1, k)) 382 return "bad uuid pointer"; 383 384 bkey_copy(&c->uuid_bucket, k); 385 uuid_io(c, READ_SYNC, k, cl); 386 387 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 388 struct uuid_entry_v0 *u0 = (void *) c->uuids; 389 struct uuid_entry *u1 = (void *) c->uuids; 390 int i; 391 392 closure_sync(cl); 393 394 /* 395 * Since the new uuid entry is bigger than the old, we have to 396 * convert starting at the highest memory address and work down 397 * in order to do it in place 398 */ 399 400 for (i = c->nr_uuids - 1; 401 i >= 0; 402 --i) { 403 memcpy(u1[i].uuid, u0[i].uuid, 16); 404 memcpy(u1[i].label, u0[i].label, 32); 405 406 u1[i].first_reg = u0[i].first_reg; 407 u1[i].last_reg = u0[i].last_reg; 408 u1[i].invalidated = u0[i].invalidated; 409 410 u1[i].flags = 0; 411 u1[i].sectors = 0; 412 } 413 } 414 415 return NULL; 416 } 417 418 static int __uuid_write(struct cache_set *c) 419 { 420 BKEY_PADDED(key) k; 421 struct closure cl; 422 closure_init_stack(&cl); 423 424 lockdep_assert_held(&bch_register_lock); 425 426 if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl)) 427 return 1; 428 429 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 430 uuid_io(c, REQ_WRITE, &k.key, &cl); 431 closure_sync(&cl); 432 433 bkey_copy(&c->uuid_bucket, &k.key); 434 __bkey_put(c, &k.key); 435 return 0; 436 } 437 438 int bch_uuid_write(struct cache_set *c) 439 { 440 int ret = __uuid_write(c); 441 442 if (!ret) 443 bch_journal_meta(c, NULL); 444 445 return ret; 446 } 447 448 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) 449 { 450 struct uuid_entry *u; 451 452 for (u = c->uuids; 453 u < c->uuids + c->nr_uuids; u++) 454 if (!memcmp(u->uuid, uuid, 16)) 455 return u; 456 457 return NULL; 458 } 459 460 static struct uuid_entry *uuid_find_empty(struct cache_set *c) 461 { 462 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; 463 return uuid_find(c, zero_uuid); 464 } 465 466 /* 467 * Bucket priorities/gens: 468 * 469 * For each bucket, we store on disk its 470 * 8 bit gen 471 * 16 bit priority 472 * 473 * See alloc.c for an explanation of the gen. The priority is used to implement 474 * lru (and in the future other) cache replacement policies; for most purposes 475 * it's just an opaque integer. 476 * 477 * The gens and the priorities don't have a whole lot to do with each other, and 478 * it's actually the gens that must be written out at specific times - it's no 479 * big deal if the priorities don't get written, if we lose them we just reuse 480 * buckets in suboptimal order. 481 * 482 * On disk they're stored in a packed array, and in as many buckets are required 483 * to fit them all. The buckets we use to store them form a list; the journal 484 * header points to the first bucket, the first bucket points to the second 485 * bucket, et cetera. 486 * 487 * This code is used by the allocation code; periodically (whenever it runs out 488 * of buckets to allocate from) the allocation code will invalidate some 489 * buckets, but it can't use those buckets until their new gens are safely on 490 * disk. 491 */ 492 493 static void prio_endio(struct bio *bio, int error) 494 { 495 struct cache *ca = bio->bi_private; 496 497 cache_set_err_on(error, ca->set, "accessing priorities"); 498 bch_bbio_free(bio, ca->set); 499 closure_put(&ca->prio); 500 } 501 502 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) 503 { 504 struct closure *cl = &ca->prio; 505 struct bio *bio = bch_bbio_alloc(ca->set); 506 507 closure_init_stack(cl); 508 509 bio->bi_sector = bucket * ca->sb.bucket_size; 510 bio->bi_bdev = ca->bdev; 511 bio->bi_rw = REQ_SYNC|REQ_META|rw; 512 bio->bi_size = bucket_bytes(ca); 513 514 bio->bi_end_io = prio_endio; 515 bio->bi_private = ca; 516 bch_bio_map(bio, ca->disk_buckets); 517 518 closure_bio_submit(bio, &ca->prio, ca); 519 closure_sync(cl); 520 } 521 522 #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \ 523 fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) 524 525 void bch_prio_write(struct cache *ca) 526 { 527 int i; 528 struct bucket *b; 529 struct closure cl; 530 531 closure_init_stack(&cl); 532 533 lockdep_assert_held(&ca->set->bucket_lock); 534 535 for (b = ca->buckets; 536 b < ca->buckets + ca->sb.nbuckets; b++) 537 b->disk_gen = b->gen; 538 539 ca->disk_buckets->seq++; 540 541 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 542 &ca->meta_sectors_written); 543 544 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 545 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 546 blktrace_msg(ca, "Starting priorities: " buckets_free(ca)); 547 548 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 549 long bucket; 550 struct prio_set *p = ca->disk_buckets; 551 struct bucket_disk *d = p->data; 552 struct bucket_disk *end = d + prios_per_bucket(ca); 553 554 for (b = ca->buckets + i * prios_per_bucket(ca); 555 b < ca->buckets + ca->sb.nbuckets && d < end; 556 b++, d++) { 557 d->prio = cpu_to_le16(b->prio); 558 d->gen = b->gen; 559 } 560 561 p->next_bucket = ca->prio_buckets[i + 1]; 562 p->magic = pset_magic(ca); 563 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 564 565 bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); 566 BUG_ON(bucket == -1); 567 568 mutex_unlock(&ca->set->bucket_lock); 569 prio_io(ca, bucket, REQ_WRITE); 570 mutex_lock(&ca->set->bucket_lock); 571 572 ca->prio_buckets[i] = bucket; 573 atomic_dec_bug(&ca->buckets[bucket].pin); 574 } 575 576 mutex_unlock(&ca->set->bucket_lock); 577 578 bch_journal_meta(ca->set, &cl); 579 closure_sync(&cl); 580 581 mutex_lock(&ca->set->bucket_lock); 582 583 ca->need_save_prio = 0; 584 585 /* 586 * Don't want the old priorities to get garbage collected until after we 587 * finish writing the new ones, and they're journalled 588 */ 589 for (i = 0; i < prio_buckets(ca); i++) 590 ca->prio_last_buckets[i] = ca->prio_buckets[i]; 591 } 592 593 static void prio_read(struct cache *ca, uint64_t bucket) 594 { 595 struct prio_set *p = ca->disk_buckets; 596 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; 597 struct bucket *b; 598 unsigned bucket_nr = 0; 599 600 for (b = ca->buckets; 601 b < ca->buckets + ca->sb.nbuckets; 602 b++, d++) { 603 if (d == end) { 604 ca->prio_buckets[bucket_nr] = bucket; 605 ca->prio_last_buckets[bucket_nr] = bucket; 606 bucket_nr++; 607 608 prio_io(ca, bucket, READ_SYNC); 609 610 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 611 pr_warn("bad csum reading priorities"); 612 613 if (p->magic != pset_magic(ca)) 614 pr_warn("bad magic reading priorities"); 615 616 bucket = p->next_bucket; 617 d = p->data; 618 } 619 620 b->prio = le16_to_cpu(d->prio); 621 b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; 622 } 623 } 624 625 /* Bcache device */ 626 627 static int open_dev(struct block_device *b, fmode_t mode) 628 { 629 struct bcache_device *d = b->bd_disk->private_data; 630 if (atomic_read(&d->closing)) 631 return -ENXIO; 632 633 closure_get(&d->cl); 634 return 0; 635 } 636 637 static void release_dev(struct gendisk *b, fmode_t mode) 638 { 639 struct bcache_device *d = b->private_data; 640 closure_put(&d->cl); 641 } 642 643 static int ioctl_dev(struct block_device *b, fmode_t mode, 644 unsigned int cmd, unsigned long arg) 645 { 646 struct bcache_device *d = b->bd_disk->private_data; 647 return d->ioctl(d, mode, cmd, arg); 648 } 649 650 static const struct block_device_operations bcache_ops = { 651 .open = open_dev, 652 .release = release_dev, 653 .ioctl = ioctl_dev, 654 .owner = THIS_MODULE, 655 }; 656 657 void bcache_device_stop(struct bcache_device *d) 658 { 659 if (!atomic_xchg(&d->closing, 1)) 660 closure_queue(&d->cl); 661 } 662 663 static void bcache_device_unlink(struct bcache_device *d) 664 { 665 unsigned i; 666 struct cache *ca; 667 668 sysfs_remove_link(&d->c->kobj, d->name); 669 sysfs_remove_link(&d->kobj, "cache"); 670 671 for_each_cache(ca, d->c, i) 672 bd_unlink_disk_holder(ca->bdev, d->disk); 673 } 674 675 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, 676 const char *name) 677 { 678 unsigned i; 679 struct cache *ca; 680 681 for_each_cache(ca, d->c, i) 682 bd_link_disk_holder(ca->bdev, d->disk); 683 684 snprintf(d->name, BCACHEDEVNAME_SIZE, 685 "%s%u", name, d->id); 686 687 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || 688 sysfs_create_link(&c->kobj, &d->kobj, d->name), 689 "Couldn't create device <-> cache set symlinks"); 690 } 691 692 static void bcache_device_detach(struct bcache_device *d) 693 { 694 lockdep_assert_held(&bch_register_lock); 695 696 if (atomic_read(&d->detaching)) { 697 struct uuid_entry *u = d->c->uuids + d->id; 698 699 SET_UUID_FLASH_ONLY(u, 0); 700 memcpy(u->uuid, invalid_uuid, 16); 701 u->invalidated = cpu_to_le32(get_seconds()); 702 bch_uuid_write(d->c); 703 704 atomic_set(&d->detaching, 0); 705 } 706 707 bcache_device_unlink(d); 708 709 d->c->devices[d->id] = NULL; 710 closure_put(&d->c->caching); 711 d->c = NULL; 712 } 713 714 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, 715 unsigned id) 716 { 717 BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags)); 718 719 d->id = id; 720 d->c = c; 721 c->devices[id] = d; 722 723 closure_get(&c->caching); 724 } 725 726 static void bcache_device_free(struct bcache_device *d) 727 { 728 lockdep_assert_held(&bch_register_lock); 729 730 pr_info("%s stopped", d->disk->disk_name); 731 732 if (d->c) 733 bcache_device_detach(d); 734 if (d->disk && d->disk->flags & GENHD_FL_UP) 735 del_gendisk(d->disk); 736 if (d->disk && d->disk->queue) 737 blk_cleanup_queue(d->disk->queue); 738 if (d->disk) 739 put_disk(d->disk); 740 741 bio_split_pool_free(&d->bio_split_hook); 742 if (d->unaligned_bvec) 743 mempool_destroy(d->unaligned_bvec); 744 if (d->bio_split) 745 bioset_free(d->bio_split); 746 747 closure_debug_destroy(&d->cl); 748 } 749 750 static int bcache_device_init(struct bcache_device *d, unsigned block_size) 751 { 752 struct request_queue *q; 753 754 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 755 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 756 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 757 bio_split_pool_init(&d->bio_split_hook) || 758 !(d->disk = alloc_disk(1)) || 759 !(q = blk_alloc_queue(GFP_KERNEL))) 760 return -ENOMEM; 761 762 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 763 764 d->disk->major = bcache_major; 765 d->disk->first_minor = bcache_minor++; 766 d->disk->fops = &bcache_ops; 767 d->disk->private_data = d; 768 769 blk_queue_make_request(q, NULL); 770 d->disk->queue = q; 771 q->queuedata = d; 772 q->backing_dev_info.congested_data = d; 773 q->limits.max_hw_sectors = UINT_MAX; 774 q->limits.max_sectors = UINT_MAX; 775 q->limits.max_segment_size = UINT_MAX; 776 q->limits.max_segments = BIO_MAX_PAGES; 777 q->limits.max_discard_sectors = UINT_MAX; 778 q->limits.io_min = block_size; 779 q->limits.logical_block_size = block_size; 780 q->limits.physical_block_size = block_size; 781 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 782 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 783 784 return 0; 785 } 786 787 /* Cached device */ 788 789 static void calc_cached_dev_sectors(struct cache_set *c) 790 { 791 uint64_t sectors = 0; 792 struct cached_dev *dc; 793 794 list_for_each_entry(dc, &c->cached_devs, list) 795 sectors += bdev_sectors(dc->bdev); 796 797 c->cached_dev_sectors = sectors; 798 } 799 800 void bch_cached_dev_run(struct cached_dev *dc) 801 { 802 struct bcache_device *d = &dc->disk; 803 804 if (atomic_xchg(&dc->running, 1)) 805 return; 806 807 if (!d->c && 808 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 809 struct closure cl; 810 closure_init_stack(&cl); 811 812 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 813 bch_write_bdev_super(dc, &cl); 814 closure_sync(&cl); 815 } 816 817 add_disk(d->disk); 818 bd_link_disk_holder(dc->bdev, dc->disk.disk); 819 #if 0 820 char *env[] = { "SYMLINK=label" , NULL }; 821 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); 822 #endif 823 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 824 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 825 pr_debug("error creating sysfs link"); 826 } 827 828 static void cached_dev_detach_finish(struct work_struct *w) 829 { 830 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 831 char buf[BDEVNAME_SIZE]; 832 struct closure cl; 833 closure_init_stack(&cl); 834 835 BUG_ON(!atomic_read(&dc->disk.detaching)); 836 BUG_ON(atomic_read(&dc->count)); 837 838 mutex_lock(&bch_register_lock); 839 840 memset(&dc->sb.set_uuid, 0, 16); 841 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); 842 843 bch_write_bdev_super(dc, &cl); 844 closure_sync(&cl); 845 846 bcache_device_detach(&dc->disk); 847 list_move(&dc->list, &uncached_devices); 848 849 mutex_unlock(&bch_register_lock); 850 851 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); 852 853 /* Drop ref we took in cached_dev_detach() */ 854 closure_put(&dc->disk.cl); 855 } 856 857 void bch_cached_dev_detach(struct cached_dev *dc) 858 { 859 lockdep_assert_held(&bch_register_lock); 860 861 if (atomic_read(&dc->disk.closing)) 862 return; 863 864 if (atomic_xchg(&dc->disk.detaching, 1)) 865 return; 866 867 /* 868 * Block the device from being closed and freed until we're finished 869 * detaching 870 */ 871 closure_get(&dc->disk.cl); 872 873 bch_writeback_queue(dc); 874 cached_dev_put(dc); 875 } 876 877 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) 878 { 879 uint32_t rtime = cpu_to_le32(get_seconds()); 880 struct uuid_entry *u; 881 char buf[BDEVNAME_SIZE]; 882 883 bdevname(dc->bdev, buf); 884 885 if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) 886 return -ENOENT; 887 888 if (dc->disk.c) { 889 pr_err("Can't attach %s: already attached", buf); 890 return -EINVAL; 891 } 892 893 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 894 pr_err("Can't attach %s: shutting down", buf); 895 return -EINVAL; 896 } 897 898 if (dc->sb.block_size < c->sb.block_size) { 899 /* Will die */ 900 pr_err("Couldn't attach %s: block size less than set's block size", 901 buf); 902 return -EINVAL; 903 } 904 905 u = uuid_find(c, dc->sb.uuid); 906 907 if (u && 908 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || 909 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { 910 memcpy(u->uuid, invalid_uuid, 16); 911 u->invalidated = cpu_to_le32(get_seconds()); 912 u = NULL; 913 } 914 915 if (!u) { 916 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 917 pr_err("Couldn't find uuid for %s in set", buf); 918 return -ENOENT; 919 } 920 921 u = uuid_find_empty(c); 922 if (!u) { 923 pr_err("Not caching %s, no room for UUID", buf); 924 return -EINVAL; 925 } 926 } 927 928 /* Deadlocks since we're called via sysfs... 929 sysfs_remove_file(&dc->kobj, &sysfs_attach); 930 */ 931 932 if (bch_is_zero(u->uuid, 16)) { 933 struct closure cl; 934 closure_init_stack(&cl); 935 936 memcpy(u->uuid, dc->sb.uuid, 16); 937 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE); 938 u->first_reg = u->last_reg = rtime; 939 bch_uuid_write(c); 940 941 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); 942 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 943 944 bch_write_bdev_super(dc, &cl); 945 closure_sync(&cl); 946 } else { 947 u->last_reg = rtime; 948 bch_uuid_write(c); 949 } 950 951 bcache_device_attach(&dc->disk, c, u - c->uuids); 952 list_move(&dc->list, &c->cached_devs); 953 calc_cached_dev_sectors(c); 954 955 smp_wmb(); 956 /* 957 * dc->c must be set before dc->count != 0 - paired with the mb in 958 * cached_dev_get() 959 */ 960 atomic_set(&dc->count, 1); 961 962 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 963 atomic_set(&dc->has_dirty, 1); 964 atomic_inc(&dc->count); 965 bch_writeback_queue(dc); 966 } 967 968 bch_cached_dev_run(dc); 969 bcache_device_link(&dc->disk, c, "bdev"); 970 971 pr_info("Caching %s as %s on set %pU", 972 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, 973 dc->disk.c->sb.set_uuid); 974 return 0; 975 } 976 977 void bch_cached_dev_release(struct kobject *kobj) 978 { 979 struct cached_dev *dc = container_of(kobj, struct cached_dev, 980 disk.kobj); 981 kfree(dc); 982 module_put(THIS_MODULE); 983 } 984 985 static void cached_dev_free(struct closure *cl) 986 { 987 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 988 989 cancel_delayed_work_sync(&dc->writeback_rate_update); 990 991 mutex_lock(&bch_register_lock); 992 993 if (atomic_read(&dc->running)) 994 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 995 bcache_device_free(&dc->disk); 996 list_del(&dc->list); 997 998 mutex_unlock(&bch_register_lock); 999 1000 if (!IS_ERR_OR_NULL(dc->bdev)) { 1001 if (dc->bdev->bd_disk) 1002 blk_sync_queue(bdev_get_queue(dc->bdev)); 1003 1004 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1005 } 1006 1007 wake_up(&unregister_wait); 1008 1009 kobject_put(&dc->disk.kobj); 1010 } 1011 1012 static void cached_dev_flush(struct closure *cl) 1013 { 1014 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1015 struct bcache_device *d = &dc->disk; 1016 1017 bch_cache_accounting_destroy(&dc->accounting); 1018 kobject_del(&d->kobj); 1019 1020 continue_at(cl, cached_dev_free, system_wq); 1021 } 1022 1023 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1024 { 1025 int ret; 1026 struct io *io; 1027 struct request_queue *q = bdev_get_queue(dc->bdev); 1028 1029 __module_get(THIS_MODULE); 1030 INIT_LIST_HEAD(&dc->list); 1031 closure_init(&dc->disk.cl, NULL); 1032 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1033 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1034 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1035 closure_init_unlocked(&dc->sb_write); 1036 INIT_LIST_HEAD(&dc->io_lru); 1037 spin_lock_init(&dc->io_lock); 1038 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1039 1040 dc->sequential_merge = true; 1041 dc->sequential_cutoff = 4 << 20; 1042 1043 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1044 list_add(&io->lru, &dc->io_lru); 1045 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1046 } 1047 1048 ret = bcache_device_init(&dc->disk, block_size); 1049 if (ret) 1050 return ret; 1051 1052 set_capacity(dc->disk.disk, 1053 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1054 1055 dc->disk.disk->queue->backing_dev_info.ra_pages = 1056 max(dc->disk.disk->queue->backing_dev_info.ra_pages, 1057 q->backing_dev_info.ra_pages); 1058 1059 bch_cached_dev_request_init(dc); 1060 bch_cached_dev_writeback_init(dc); 1061 return 0; 1062 } 1063 1064 /* Cached device - bcache superblock */ 1065 1066 static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1067 struct block_device *bdev, 1068 struct cached_dev *dc) 1069 { 1070 char name[BDEVNAME_SIZE]; 1071 const char *err = "cannot allocate memory"; 1072 struct cache_set *c; 1073 1074 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1075 dc->bdev = bdev; 1076 dc->bdev->bd_holder = dc; 1077 1078 bio_init(&dc->sb_bio); 1079 dc->sb_bio.bi_max_vecs = 1; 1080 dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; 1081 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; 1082 get_page(sb_page); 1083 1084 if (cached_dev_init(dc, sb->block_size << 9)) 1085 goto err; 1086 1087 err = "error creating kobject"; 1088 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, 1089 "bcache")) 1090 goto err; 1091 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1092 goto err; 1093 1094 pr_info("registered backing device %s", bdevname(bdev, name)); 1095 1096 list_add(&dc->list, &uncached_devices); 1097 list_for_each_entry(c, &bch_cache_sets, list) 1098 bch_cached_dev_attach(dc, c); 1099 1100 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || 1101 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1102 bch_cached_dev_run(dc); 1103 1104 return; 1105 err: 1106 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1107 bcache_device_stop(&dc->disk); 1108 } 1109 1110 /* Flash only volumes */ 1111 1112 void bch_flash_dev_release(struct kobject *kobj) 1113 { 1114 struct bcache_device *d = container_of(kobj, struct bcache_device, 1115 kobj); 1116 kfree(d); 1117 } 1118 1119 static void flash_dev_free(struct closure *cl) 1120 { 1121 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1122 bcache_device_free(d); 1123 kobject_put(&d->kobj); 1124 } 1125 1126 static void flash_dev_flush(struct closure *cl) 1127 { 1128 struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1129 1130 bcache_device_unlink(d); 1131 kobject_del(&d->kobj); 1132 continue_at(cl, flash_dev_free, system_wq); 1133 } 1134 1135 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) 1136 { 1137 struct bcache_device *d = kzalloc(sizeof(struct bcache_device), 1138 GFP_KERNEL); 1139 if (!d) 1140 return -ENOMEM; 1141 1142 closure_init(&d->cl, NULL); 1143 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1144 1145 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1146 1147 if (bcache_device_init(d, block_bytes(c))) 1148 goto err; 1149 1150 bcache_device_attach(d, c, u - c->uuids); 1151 set_capacity(d->disk, u->sectors); 1152 bch_flash_dev_request_init(d); 1153 add_disk(d->disk); 1154 1155 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1156 goto err; 1157 1158 bcache_device_link(d, c, "volume"); 1159 1160 return 0; 1161 err: 1162 kobject_put(&d->kobj); 1163 return -ENOMEM; 1164 } 1165 1166 static int flash_devs_run(struct cache_set *c) 1167 { 1168 int ret = 0; 1169 struct uuid_entry *u; 1170 1171 for (u = c->uuids; 1172 u < c->uuids + c->nr_uuids && !ret; 1173 u++) 1174 if (UUID_FLASH_ONLY(u)) 1175 ret = flash_dev_run(c, u); 1176 1177 return ret; 1178 } 1179 1180 int bch_flash_dev_create(struct cache_set *c, uint64_t size) 1181 { 1182 struct uuid_entry *u; 1183 1184 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1185 return -EINTR; 1186 1187 u = uuid_find_empty(c); 1188 if (!u) { 1189 pr_err("Can't create volume, no room for UUID"); 1190 return -EINVAL; 1191 } 1192 1193 get_random_bytes(u->uuid, 16); 1194 memset(u->label, 0, 32); 1195 u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); 1196 1197 SET_UUID_FLASH_ONLY(u, 1); 1198 u->sectors = size >> 9; 1199 1200 bch_uuid_write(c); 1201 1202 return flash_dev_run(c, u); 1203 } 1204 1205 /* Cache set */ 1206 1207 __printf(2, 3) 1208 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) 1209 { 1210 va_list args; 1211 1212 if (test_bit(CACHE_SET_STOPPING, &c->flags)) 1213 return false; 1214 1215 /* XXX: we can be called from atomic context 1216 acquire_console_sem(); 1217 */ 1218 1219 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid); 1220 1221 va_start(args, fmt); 1222 vprintk(fmt, args); 1223 va_end(args); 1224 1225 printk(", disabling caching\n"); 1226 1227 bch_cache_set_unregister(c); 1228 return true; 1229 } 1230 1231 void bch_cache_set_release(struct kobject *kobj) 1232 { 1233 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 1234 kfree(c); 1235 module_put(THIS_MODULE); 1236 } 1237 1238 static void cache_set_free(struct closure *cl) 1239 { 1240 struct cache_set *c = container_of(cl, struct cache_set, cl); 1241 struct cache *ca; 1242 unsigned i; 1243 1244 if (!IS_ERR_OR_NULL(c->debug)) 1245 debugfs_remove(c->debug); 1246 1247 bch_open_buckets_free(c); 1248 bch_btree_cache_free(c); 1249 bch_journal_free(c); 1250 1251 for_each_cache(ca, c, i) 1252 if (ca) 1253 kobject_put(&ca->kobj); 1254 1255 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1256 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); 1257 1258 kfree(c->fill_iter); 1259 if (c->bio_split) 1260 bioset_free(c->bio_split); 1261 if (c->bio_meta) 1262 mempool_destroy(c->bio_meta); 1263 if (c->search) 1264 mempool_destroy(c->search); 1265 kfree(c->devices); 1266 1267 mutex_lock(&bch_register_lock); 1268 list_del(&c->list); 1269 mutex_unlock(&bch_register_lock); 1270 1271 pr_info("Cache set %pU unregistered", c->sb.set_uuid); 1272 wake_up(&unregister_wait); 1273 1274 closure_debug_destroy(&c->cl); 1275 kobject_put(&c->kobj); 1276 } 1277 1278 static void cache_set_flush(struct closure *cl) 1279 { 1280 struct cache_set *c = container_of(cl, struct cache_set, caching); 1281 struct btree *b; 1282 1283 /* Shut down allocator threads */ 1284 set_bit(CACHE_SET_STOPPING_2, &c->flags); 1285 wake_up(&c->alloc_wait); 1286 1287 bch_cache_accounting_destroy(&c->accounting); 1288 1289 kobject_put(&c->internal); 1290 kobject_del(&c->kobj); 1291 1292 if (!IS_ERR_OR_NULL(c->root)) 1293 list_add(&c->root->list, &c->btree_cache); 1294 1295 /* Should skip this if we're unregistering because of an error */ 1296 list_for_each_entry(b, &c->btree_cache, list) 1297 if (btree_node_dirty(b)) 1298 bch_btree_write(b, true, NULL); 1299 1300 closure_return(cl); 1301 } 1302 1303 static void __cache_set_unregister(struct closure *cl) 1304 { 1305 struct cache_set *c = container_of(cl, struct cache_set, caching); 1306 struct cached_dev *dc, *t; 1307 size_t i; 1308 1309 mutex_lock(&bch_register_lock); 1310 1311 if (test_bit(CACHE_SET_UNREGISTERING, &c->flags)) 1312 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 1313 bch_cached_dev_detach(dc); 1314 1315 for (i = 0; i < c->nr_uuids; i++) 1316 if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i])) 1317 bcache_device_stop(c->devices[i]); 1318 1319 mutex_unlock(&bch_register_lock); 1320 1321 continue_at(cl, cache_set_flush, system_wq); 1322 } 1323 1324 void bch_cache_set_stop(struct cache_set *c) 1325 { 1326 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) 1327 closure_queue(&c->caching); 1328 } 1329 1330 void bch_cache_set_unregister(struct cache_set *c) 1331 { 1332 set_bit(CACHE_SET_UNREGISTERING, &c->flags); 1333 bch_cache_set_stop(c); 1334 } 1335 1336 #define alloc_bucket_pages(gfp, c) \ 1337 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) 1338 1339 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) 1340 { 1341 int iter_size; 1342 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); 1343 if (!c) 1344 return NULL; 1345 1346 __module_get(THIS_MODULE); 1347 closure_init(&c->cl, NULL); 1348 set_closure_fn(&c->cl, cache_set_free, system_wq); 1349 1350 closure_init(&c->caching, &c->cl); 1351 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); 1352 1353 /* Maybe create continue_at_noreturn() and use it here? */ 1354 closure_set_stopped(&c->cl); 1355 closure_put(&c->cl); 1356 1357 kobject_init(&c->kobj, &bch_cache_set_ktype); 1358 kobject_init(&c->internal, &bch_cache_set_internal_ktype); 1359 1360 bch_cache_accounting_init(&c->accounting, &c->cl); 1361 1362 memcpy(c->sb.set_uuid, sb->set_uuid, 16); 1363 c->sb.block_size = sb->block_size; 1364 c->sb.bucket_size = sb->bucket_size; 1365 c->sb.nr_in_set = sb->nr_in_set; 1366 c->sb.last_mount = sb->last_mount; 1367 c->bucket_bits = ilog2(sb->bucket_size); 1368 c->block_bits = ilog2(sb->block_size); 1369 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1370 1371 c->btree_pages = c->sb.bucket_size / PAGE_SECTORS; 1372 if (c->btree_pages > BTREE_MAX_PAGES) 1373 c->btree_pages = max_t(int, c->btree_pages / 4, 1374 BTREE_MAX_PAGES); 1375 1376 init_waitqueue_head(&c->alloc_wait); 1377 mutex_init(&c->bucket_lock); 1378 mutex_init(&c->fill_lock); 1379 mutex_init(&c->sort_lock); 1380 spin_lock_init(&c->sort_time_lock); 1381 closure_init_unlocked(&c->sb_write); 1382 closure_init_unlocked(&c->uuid_write); 1383 spin_lock_init(&c->btree_read_time_lock); 1384 bch_moving_init_cache_set(c); 1385 1386 INIT_LIST_HEAD(&c->list); 1387 INIT_LIST_HEAD(&c->cached_devs); 1388 INIT_LIST_HEAD(&c->btree_cache); 1389 INIT_LIST_HEAD(&c->btree_cache_freeable); 1390 INIT_LIST_HEAD(&c->btree_cache_freed); 1391 INIT_LIST_HEAD(&c->data_buckets); 1392 1393 c->search = mempool_create_slab_pool(32, bch_search_cache); 1394 if (!c->search) 1395 goto err; 1396 1397 iter_size = (sb->bucket_size / sb->block_size + 1) * 1398 sizeof(struct btree_iter_set); 1399 1400 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1401 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1402 sizeof(struct bbio) + sizeof(struct bio_vec) * 1403 bucket_pages(c))) || 1404 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 1405 !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) || 1406 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || 1407 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1408 bch_journal_alloc(c) || 1409 bch_btree_cache_alloc(c) || 1410 bch_open_buckets_alloc(c)) 1411 goto err; 1412 1413 c->fill_iter->size = sb->bucket_size / sb->block_size; 1414 1415 c->congested_read_threshold_us = 2000; 1416 c->congested_write_threshold_us = 20000; 1417 c->error_limit = 8 << IO_ERROR_SHIFT; 1418 1419 return c; 1420 err: 1421 bch_cache_set_unregister(c); 1422 return NULL; 1423 } 1424 1425 static void run_cache_set(struct cache_set *c) 1426 { 1427 const char *err = "cannot allocate memory"; 1428 struct cached_dev *dc, *t; 1429 struct cache *ca; 1430 unsigned i; 1431 1432 struct btree_op op; 1433 bch_btree_op_init_stack(&op); 1434 op.lock = SHRT_MAX; 1435 1436 for_each_cache(ca, c, i) 1437 c->nbuckets += ca->sb.nbuckets; 1438 1439 if (CACHE_SYNC(&c->sb)) { 1440 LIST_HEAD(journal); 1441 struct bkey *k; 1442 struct jset *j; 1443 1444 err = "cannot allocate memory for journal"; 1445 if (bch_journal_read(c, &journal, &op)) 1446 goto err; 1447 1448 pr_debug("btree_journal_read() done"); 1449 1450 err = "no journal entries found"; 1451 if (list_empty(&journal)) 1452 goto err; 1453 1454 j = &list_entry(journal.prev, struct journal_replay, list)->j; 1455 1456 err = "IO error reading priorities"; 1457 for_each_cache(ca, c, i) 1458 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); 1459 1460 /* 1461 * If prio_read() fails it'll call cache_set_error and we'll 1462 * tear everything down right away, but if we perhaps checked 1463 * sooner we could avoid journal replay. 1464 */ 1465 1466 k = &j->btree_root; 1467 1468 err = "bad btree root"; 1469 if (__bch_ptr_invalid(c, j->btree_level + 1, k)) 1470 goto err; 1471 1472 err = "error reading btree root"; 1473 c->root = bch_btree_node_get(c, k, j->btree_level, &op); 1474 if (IS_ERR_OR_NULL(c->root)) 1475 goto err; 1476 1477 list_del_init(&c->root->list); 1478 rw_unlock(true, c->root); 1479 1480 err = uuid_read(c, j, &op.cl); 1481 if (err) 1482 goto err; 1483 1484 err = "error in recovery"; 1485 if (bch_btree_check(c, &op)) 1486 goto err; 1487 1488 bch_journal_mark(c, &journal); 1489 bch_btree_gc_finish(c); 1490 pr_debug("btree_check() done"); 1491 1492 /* 1493 * bcache_journal_next() can't happen sooner, or 1494 * btree_gc_finish() will give spurious errors about last_gc > 1495 * gc_gen - this is a hack but oh well. 1496 */ 1497 bch_journal_next(&c->journal); 1498 1499 for_each_cache(ca, c, i) 1500 closure_call(&ca->alloc, bch_allocator_thread, 1501 system_wq, &c->cl); 1502 1503 /* 1504 * First place it's safe to allocate: btree_check() and 1505 * btree_gc_finish() have to run before we have buckets to 1506 * allocate, and bch_bucket_alloc_set() might cause a journal 1507 * entry to be written so bcache_journal_next() has to be called 1508 * first. 1509 * 1510 * If the uuids were in the old format we have to rewrite them 1511 * before the next journal entry is written: 1512 */ 1513 if (j->version < BCACHE_JSET_VERSION_UUID) 1514 __uuid_write(c); 1515 1516 bch_journal_replay(c, &journal, &op); 1517 } else { 1518 pr_notice("invalidating existing data"); 1519 /* Don't want invalidate_buckets() to queue a gc yet */ 1520 closure_lock(&c->gc, NULL); 1521 1522 for_each_cache(ca, c, i) { 1523 unsigned j; 1524 1525 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, 1526 2, SB_JOURNAL_BUCKETS); 1527 1528 for (j = 0; j < ca->sb.keys; j++) 1529 ca->sb.d[j] = ca->sb.first_bucket + j; 1530 } 1531 1532 bch_btree_gc_finish(c); 1533 1534 for_each_cache(ca, c, i) 1535 closure_call(&ca->alloc, bch_allocator_thread, 1536 ca->alloc_workqueue, &c->cl); 1537 1538 mutex_lock(&c->bucket_lock); 1539 for_each_cache(ca, c, i) 1540 bch_prio_write(ca); 1541 mutex_unlock(&c->bucket_lock); 1542 1543 wake_up(&c->alloc_wait); 1544 1545 err = "cannot allocate new UUID bucket"; 1546 if (__uuid_write(c)) 1547 goto err_unlock_gc; 1548 1549 err = "cannot allocate new btree root"; 1550 c->root = bch_btree_node_alloc(c, 0, &op.cl); 1551 if (IS_ERR_OR_NULL(c->root)) 1552 goto err_unlock_gc; 1553 1554 bkey_copy_key(&c->root->key, &MAX_KEY); 1555 bch_btree_write(c->root, true, &op); 1556 1557 bch_btree_set_root(c->root); 1558 rw_unlock(true, c->root); 1559 1560 /* 1561 * We don't want to write the first journal entry until 1562 * everything is set up - fortunately journal entries won't be 1563 * written until the SET_CACHE_SYNC() here: 1564 */ 1565 SET_CACHE_SYNC(&c->sb, true); 1566 1567 bch_journal_next(&c->journal); 1568 bch_journal_meta(c, &op.cl); 1569 1570 /* Unlock */ 1571 closure_set_stopped(&c->gc.cl); 1572 closure_put(&c->gc.cl); 1573 } 1574 1575 closure_sync(&op.cl); 1576 c->sb.last_mount = get_seconds(); 1577 bcache_write_super(c); 1578 1579 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1580 bch_cached_dev_attach(dc, c); 1581 1582 flash_devs_run(c); 1583 1584 return; 1585 err_unlock_gc: 1586 closure_set_stopped(&c->gc.cl); 1587 closure_put(&c->gc.cl); 1588 err: 1589 closure_sync(&op.cl); 1590 /* XXX: test this, it's broken */ 1591 bch_cache_set_error(c, err); 1592 } 1593 1594 static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1595 { 1596 return ca->sb.block_size == c->sb.block_size && 1597 ca->sb.bucket_size == c->sb.block_size && 1598 ca->sb.nr_in_set == c->sb.nr_in_set; 1599 } 1600 1601 static const char *register_cache_set(struct cache *ca) 1602 { 1603 char buf[12]; 1604 const char *err = "cannot allocate memory"; 1605 struct cache_set *c; 1606 1607 list_for_each_entry(c, &bch_cache_sets, list) 1608 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { 1609 if (c->cache[ca->sb.nr_this_dev]) 1610 return "duplicate cache set member"; 1611 1612 if (!can_attach_cache(ca, c)) 1613 return "cache sb does not match set"; 1614 1615 if (!CACHE_SYNC(&ca->sb)) 1616 SET_CACHE_SYNC(&c->sb, false); 1617 1618 goto found; 1619 } 1620 1621 c = bch_cache_set_alloc(&ca->sb); 1622 if (!c) 1623 return err; 1624 1625 err = "error creating kobject"; 1626 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || 1627 kobject_add(&c->internal, &c->kobj, "internal")) 1628 goto err; 1629 1630 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) 1631 goto err; 1632 1633 bch_debug_init_cache_set(c); 1634 1635 list_add(&c->list, &bch_cache_sets); 1636 found: 1637 sprintf(buf, "cache%i", ca->sb.nr_this_dev); 1638 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || 1639 sysfs_create_link(&c->kobj, &ca->kobj, buf)) 1640 goto err; 1641 1642 if (ca->sb.seq > c->sb.seq) { 1643 c->sb.version = ca->sb.version; 1644 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 1645 c->sb.flags = ca->sb.flags; 1646 c->sb.seq = ca->sb.seq; 1647 pr_debug("set version = %llu", c->sb.version); 1648 } 1649 1650 ca->set = c; 1651 ca->set->cache[ca->sb.nr_this_dev] = ca; 1652 c->cache_by_alloc[c->caches_loaded++] = ca; 1653 1654 if (c->caches_loaded == c->sb.nr_in_set) 1655 run_cache_set(c); 1656 1657 return NULL; 1658 err: 1659 bch_cache_set_unregister(c); 1660 return err; 1661 } 1662 1663 /* Cache device */ 1664 1665 void bch_cache_release(struct kobject *kobj) 1666 { 1667 struct cache *ca = container_of(kobj, struct cache, kobj); 1668 1669 if (ca->set) 1670 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1671 1672 bch_cache_allocator_exit(ca); 1673 1674 bio_split_pool_free(&ca->bio_split_hook); 1675 1676 if (ca->alloc_workqueue) 1677 destroy_workqueue(ca->alloc_workqueue); 1678 1679 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1680 kfree(ca->prio_buckets); 1681 vfree(ca->buckets); 1682 1683 free_heap(&ca->heap); 1684 free_fifo(&ca->unused); 1685 free_fifo(&ca->free_inc); 1686 free_fifo(&ca->free); 1687 1688 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1689 put_page(ca->sb_bio.bi_io_vec[0].bv_page); 1690 1691 if (!IS_ERR_OR_NULL(ca->bdev)) { 1692 blk_sync_queue(bdev_get_queue(ca->bdev)); 1693 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1694 } 1695 1696 kfree(ca); 1697 module_put(THIS_MODULE); 1698 } 1699 1700 static int cache_alloc(struct cache_sb *sb, struct cache *ca) 1701 { 1702 size_t free; 1703 struct bucket *b; 1704 1705 __module_get(THIS_MODULE); 1706 kobject_init(&ca->kobj, &bch_cache_ktype); 1707 1708 INIT_LIST_HEAD(&ca->discards); 1709 1710 bio_init(&ca->journal.bio); 1711 ca->journal.bio.bi_max_vecs = 8; 1712 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1713 1714 free = roundup_pow_of_two(ca->sb.nbuckets) >> 9; 1715 free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2); 1716 1717 if (!init_fifo(&ca->free, free, GFP_KERNEL) || 1718 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1719 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1720 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1721 !(ca->buckets = vzalloc(sizeof(struct bucket) * 1722 ca->sb.nbuckets)) || 1723 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1724 2, GFP_KERNEL)) || 1725 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1726 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || 1727 bio_split_pool_init(&ca->bio_split_hook)) 1728 return -ENOMEM; 1729 1730 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1731 1732 for_each_bucket(b, ca) 1733 atomic_set(&b->pin, 0); 1734 1735 if (bch_cache_allocator_init(ca)) 1736 goto err; 1737 1738 return 0; 1739 err: 1740 kobject_put(&ca->kobj); 1741 return -ENOMEM; 1742 } 1743 1744 static void register_cache(struct cache_sb *sb, struct page *sb_page, 1745 struct block_device *bdev, struct cache *ca) 1746 { 1747 char name[BDEVNAME_SIZE]; 1748 const char *err = "cannot allocate memory"; 1749 1750 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1751 ca->bdev = bdev; 1752 ca->bdev->bd_holder = ca; 1753 1754 bio_init(&ca->sb_bio); 1755 ca->sb_bio.bi_max_vecs = 1; 1756 ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; 1757 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; 1758 get_page(sb_page); 1759 1760 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1761 ca->discard = CACHE_DISCARD(&ca->sb); 1762 1763 if (cache_alloc(sb, ca) != 0) 1764 goto err; 1765 1766 err = "error creating kobject"; 1767 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) 1768 goto err; 1769 1770 err = register_cache_set(ca); 1771 if (err) 1772 goto err; 1773 1774 pr_info("registered cache device %s", bdevname(bdev, name)); 1775 return; 1776 err: 1777 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1778 kobject_put(&ca->kobj); 1779 } 1780 1781 /* Global interfaces/init */ 1782 1783 static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1784 const char *, size_t); 1785 1786 kobj_attribute_write(register, register_bcache); 1787 kobj_attribute_write(register_quiet, register_bcache); 1788 1789 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1790 const char *buffer, size_t size) 1791 { 1792 ssize_t ret = size; 1793 const char *err = "cannot allocate memory"; 1794 char *path = NULL; 1795 struct cache_sb *sb = NULL; 1796 struct block_device *bdev = NULL; 1797 struct page *sb_page = NULL; 1798 1799 if (!try_module_get(THIS_MODULE)) 1800 return -EBUSY; 1801 1802 mutex_lock(&bch_register_lock); 1803 1804 if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || 1805 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 1806 goto err; 1807 1808 err = "failed to open device"; 1809 bdev = blkdev_get_by_path(strim(path), 1810 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1811 sb); 1812 if (IS_ERR(bdev)) { 1813 if (bdev == ERR_PTR(-EBUSY)) 1814 err = "device busy"; 1815 goto err; 1816 } 1817 1818 err = "failed to set blocksize"; 1819 if (set_blocksize(bdev, 4096)) 1820 goto err_close; 1821 1822 err = read_super(sb, bdev, &sb_page); 1823 if (err) 1824 goto err_close; 1825 1826 if (SB_IS_BDEV(sb)) { 1827 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1828 if (!dc) 1829 goto err_close; 1830 1831 register_bdev(sb, sb_page, bdev, dc); 1832 } else { 1833 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1834 if (!ca) 1835 goto err_close; 1836 1837 register_cache(sb, sb_page, bdev, ca); 1838 } 1839 out: 1840 if (sb_page) 1841 put_page(sb_page); 1842 kfree(sb); 1843 kfree(path); 1844 mutex_unlock(&bch_register_lock); 1845 module_put(THIS_MODULE); 1846 return ret; 1847 1848 err_close: 1849 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1850 err: 1851 if (attr != &ksysfs_register_quiet) 1852 pr_info("error opening %s: %s", path, err); 1853 ret = -EINVAL; 1854 goto out; 1855 } 1856 1857 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) 1858 { 1859 if (code == SYS_DOWN || 1860 code == SYS_HALT || 1861 code == SYS_POWER_OFF) { 1862 DEFINE_WAIT(wait); 1863 unsigned long start = jiffies; 1864 bool stopped = false; 1865 1866 struct cache_set *c, *tc; 1867 struct cached_dev *dc, *tdc; 1868 1869 mutex_lock(&bch_register_lock); 1870 1871 if (list_empty(&bch_cache_sets) && 1872 list_empty(&uncached_devices)) 1873 goto out; 1874 1875 pr_info("Stopping all devices:"); 1876 1877 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1878 bch_cache_set_stop(c); 1879 1880 list_for_each_entry_safe(dc, tdc, &uncached_devices, list) 1881 bcache_device_stop(&dc->disk); 1882 1883 /* What's a condition variable? */ 1884 while (1) { 1885 long timeout = start + 2 * HZ - jiffies; 1886 1887 stopped = list_empty(&bch_cache_sets) && 1888 list_empty(&uncached_devices); 1889 1890 if (timeout < 0 || stopped) 1891 break; 1892 1893 prepare_to_wait(&unregister_wait, &wait, 1894 TASK_UNINTERRUPTIBLE); 1895 1896 mutex_unlock(&bch_register_lock); 1897 schedule_timeout(timeout); 1898 mutex_lock(&bch_register_lock); 1899 } 1900 1901 finish_wait(&unregister_wait, &wait); 1902 1903 if (stopped) 1904 pr_info("All devices stopped"); 1905 else 1906 pr_notice("Timeout waiting for devices to be closed"); 1907 out: 1908 mutex_unlock(&bch_register_lock); 1909 } 1910 1911 return NOTIFY_DONE; 1912 } 1913 1914 static struct notifier_block reboot = { 1915 .notifier_call = bcache_reboot, 1916 .priority = INT_MAX, /* before any real devices */ 1917 }; 1918 1919 static void bcache_exit(void) 1920 { 1921 bch_debug_exit(); 1922 bch_writeback_exit(); 1923 bch_request_exit(); 1924 bch_btree_exit(); 1925 if (bcache_kobj) 1926 kobject_put(bcache_kobj); 1927 if (bcache_wq) 1928 destroy_workqueue(bcache_wq); 1929 unregister_blkdev(bcache_major, "bcache"); 1930 unregister_reboot_notifier(&reboot); 1931 } 1932 1933 static int __init bcache_init(void) 1934 { 1935 static const struct attribute *files[] = { 1936 &ksysfs_register.attr, 1937 &ksysfs_register_quiet.attr, 1938 NULL 1939 }; 1940 1941 mutex_init(&bch_register_lock); 1942 init_waitqueue_head(&unregister_wait); 1943 register_reboot_notifier(&reboot); 1944 closure_debug_init(); 1945 1946 bcache_major = register_blkdev(0, "bcache"); 1947 if (bcache_major < 0) 1948 return bcache_major; 1949 1950 if (!(bcache_wq = create_workqueue("bcache")) || 1951 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || 1952 sysfs_create_files(bcache_kobj, files) || 1953 bch_btree_init() || 1954 bch_request_init() || 1955 bch_writeback_init() || 1956 bch_debug_init(bcache_kobj)) 1957 goto err; 1958 1959 return 0; 1960 err: 1961 bcache_exit(); 1962 return -ENOMEM; 1963 } 1964 1965 module_exit(bcache_exit); 1966 module_init(bcache_init); 1967