1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Primary bucket allocation code 4 * 5 * Copyright 2012 Google, Inc. 6 * 7 * Allocation in bcache is done in terms of buckets: 8 * 9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in 10 * btree pointers - they must match for the pointer to be considered valid. 11 * 12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a 13 * bucket simply by incrementing its gen. 14 * 15 * The gens (along with the priorities; it's really the gens are important but 16 * the code is named as if it's the priorities) are written in an arbitrary list 17 * of buckets on disk, with a pointer to them in the journal header. 18 * 19 * When we invalidate a bucket, we have to write its new gen to disk and wait 20 * for that write to complete before we use it - otherwise after a crash we 21 * could have pointers that appeared to be good but pointed to data that had 22 * been overwritten. 23 * 24 * Since the gens and priorities are all stored contiguously on disk, we can 25 * batch this up: We fill up the free_inc list with freshly invalidated buckets, 26 * call prio_write(), and when prio_write() finishes we pull buckets off the 27 * free_inc list and optionally discard them. 28 * 29 * free_inc isn't the only freelist - if it was, we'd often to sleep while 30 * priorities and gens were being written before we could allocate. c->free is a 31 * smaller freelist, and buckets on that list are always ready to be used. 32 * 33 * If we've got discards enabled, that happens when a bucket moves from the 34 * free_inc list to the free list. 35 * 36 * There is another freelist, because sometimes we have buckets that we know 37 * have nothing pointing into them - these we can reuse without waiting for 38 * priorities to be rewritten. These come from freed btree nodes and buckets 39 * that garbage collection discovered no longer had valid keys pointing into 40 * them (because they were overwritten). That's the unused list - buckets on the 41 * unused list move to the free list, optionally being discarded in the process. 42 * 43 * It's also important to ensure that gens don't wrap around - with respect to 44 * either the oldest gen in the btree or the gen on disk. This is quite 45 * difficult to do in practice, but we explicitly guard against it anyways - if 46 * a bucket is in danger of wrapping around we simply skip invalidating it that 47 * time around, and we garbage collect or rewrite the priorities sooner than we 48 * would have otherwise. 49 * 50 * bch_bucket_alloc() allocates a single bucket from a specific cache. 51 * 52 * bch_bucket_alloc_set() allocates one bucket from different caches 53 * out of a cache set. 54 * 55 * free_some_buckets() drives all the processes described above. It's called 56 * from bch_bucket_alloc() and a few other places that need to make sure free 57 * buckets are ready. 58 * 59 * invalidate_buckets_(lru|fifo)() find buckets that are available to be 60 * invalidated, and then invalidate them and stick them on the free_inc list - 61 * in either lru or fifo order. 62 */ 63 64 #include "bcache.h" 65 #include "btree.h" 66 67 #include <linux/blkdev.h> 68 #include <linux/kthread.h> 69 #include <linux/random.h> 70 #include <trace/events/bcache.h> 71 72 #define MAX_OPEN_BUCKETS 128 73 74 /* Bucket heap / gen */ 75 76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) 77 { 78 uint8_t ret = ++b->gen; 79 80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); 81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); 82 83 return ret; 84 } 85 86 void bch_rescale_priorities(struct cache_set *c, int sectors) 87 { 88 struct cache *ca; 89 struct bucket *b; 90 unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024; 91 int r; 92 93 atomic_sub(sectors, &c->rescale); 94 95 do { 96 r = atomic_read(&c->rescale); 97 98 if (r >= 0) 99 return; 100 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r); 101 102 mutex_lock(&c->bucket_lock); 103 104 c->min_prio = USHRT_MAX; 105 106 ca = c->cache; 107 for_each_bucket(b, ca) 108 if (b->prio && 109 b->prio != BTREE_PRIO && 110 !atomic_read(&b->pin)) { 111 b->prio--; 112 c->min_prio = min(c->min_prio, b->prio); 113 } 114 115 mutex_unlock(&c->bucket_lock); 116 } 117 118 /* 119 * Background allocation thread: scans for buckets to be invalidated, 120 * invalidates them, rewrites prios/gens (marking them as invalidated on disk), 121 * then optionally issues discard commands to the newly free buckets, then puts 122 * them on the various freelists. 123 */ 124 125 static inline bool can_inc_bucket_gen(struct bucket *b) 126 { 127 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX; 128 } 129 130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) 131 { 132 return (ca->set->gc_mark_valid || b->reclaimable_in_gc) && 133 ((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) && 134 !atomic_read(&b->pin) && can_inc_bucket_gen(b)); 135 } 136 137 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) 138 { 139 lockdep_assert_held(&ca->set->bucket_lock); 140 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE); 141 142 if (GC_SECTORS_USED(b)) 143 trace_bcache_invalidate(ca, b - ca->buckets); 144 145 bch_inc_gen(ca, b); 146 b->prio = INITIAL_PRIO; 147 atomic_inc(&b->pin); 148 b->reclaimable_in_gc = 0; 149 } 150 151 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) 152 { 153 __bch_invalidate_one_bucket(ca, b); 154 155 fifo_push(&ca->free_inc, b - ca->buckets); 156 } 157 158 /* 159 * Determines what order we're going to reuse buckets, smallest bucket_prio() 160 * first: we also take into account the number of sectors of live data in that 161 * bucket, and in order for that multiply to make sense we have to scale bucket 162 * 163 * Thus, we scale the bucket priorities so that the bucket with the smallest 164 * prio is worth 1/8th of what INITIAL_PRIO is worth. 165 */ 166 167 #define bucket_prio(b) \ 168 ({ \ 169 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ 170 \ 171 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \ 172 }) 173 174 #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r)) 175 #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r)) 176 177 static void invalidate_buckets_lru(struct cache *ca) 178 { 179 struct bucket *b; 180 ssize_t i; 181 182 ca->heap.used = 0; 183 184 for_each_bucket(b, ca) { 185 if (!bch_can_invalidate_bucket(ca, b)) 186 continue; 187 188 if (!heap_full(&ca->heap)) 189 heap_add(&ca->heap, b, bucket_max_cmp); 190 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { 191 ca->heap.data[0] = b; 192 heap_sift(&ca->heap, 0, bucket_max_cmp); 193 } 194 } 195 196 for (i = ca->heap.used / 2 - 1; i >= 0; --i) 197 heap_sift(&ca->heap, i, bucket_min_cmp); 198 199 while (!fifo_full(&ca->free_inc)) { 200 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { 201 /* 202 * We don't want to be calling invalidate_buckets() 203 * multiple times when it can't do anything 204 */ 205 ca->invalidate_needs_gc = 1; 206 wake_up_gc(ca->set); 207 return; 208 } 209 210 bch_invalidate_one_bucket(ca, b); 211 } 212 } 213 214 static void invalidate_buckets_fifo(struct cache *ca) 215 { 216 struct bucket *b; 217 size_t checked = 0; 218 219 while (!fifo_full(&ca->free_inc)) { 220 if (ca->fifo_last_bucket < ca->sb.first_bucket || 221 ca->fifo_last_bucket >= ca->sb.nbuckets) 222 ca->fifo_last_bucket = ca->sb.first_bucket; 223 224 b = ca->buckets + ca->fifo_last_bucket++; 225 226 if (bch_can_invalidate_bucket(ca, b)) 227 bch_invalidate_one_bucket(ca, b); 228 229 if (++checked >= ca->sb.nbuckets) { 230 ca->invalidate_needs_gc = 1; 231 wake_up_gc(ca->set); 232 return; 233 } 234 } 235 } 236 237 static void invalidate_buckets_random(struct cache *ca) 238 { 239 struct bucket *b; 240 size_t checked = 0; 241 242 while (!fifo_full(&ca->free_inc)) { 243 size_t n; 244 245 get_random_bytes(&n, sizeof(n)); 246 247 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); 248 n += ca->sb.first_bucket; 249 250 b = ca->buckets + n; 251 252 if (bch_can_invalidate_bucket(ca, b)) 253 bch_invalidate_one_bucket(ca, b); 254 255 if (++checked >= ca->sb.nbuckets / 2) { 256 ca->invalidate_needs_gc = 1; 257 wake_up_gc(ca->set); 258 return; 259 } 260 } 261 } 262 263 static void invalidate_buckets(struct cache *ca) 264 { 265 BUG_ON(ca->invalidate_needs_gc); 266 267 switch (CACHE_REPLACEMENT(&ca->sb)) { 268 case CACHE_REPLACEMENT_LRU: 269 invalidate_buckets_lru(ca); 270 break; 271 case CACHE_REPLACEMENT_FIFO: 272 invalidate_buckets_fifo(ca); 273 break; 274 case CACHE_REPLACEMENT_RANDOM: 275 invalidate_buckets_random(ca); 276 break; 277 } 278 } 279 280 #define allocator_wait(ca, cond) \ 281 do { \ 282 while (1) { \ 283 set_current_state(TASK_INTERRUPTIBLE); \ 284 if (cond) \ 285 break; \ 286 \ 287 mutex_unlock(&(ca)->set->bucket_lock); \ 288 if (kthread_should_stop() || \ 289 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ 290 set_current_state(TASK_RUNNING); \ 291 goto out; \ 292 } \ 293 \ 294 schedule(); \ 295 mutex_lock(&(ca)->set->bucket_lock); \ 296 } \ 297 __set_current_state(TASK_RUNNING); \ 298 } while (0) 299 300 static int bch_allocator_push(struct cache *ca, long bucket) 301 { 302 unsigned int i; 303 304 /* Prios/gens are actually the most important reserve */ 305 if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) 306 return true; 307 308 for (i = 0; i < RESERVE_NR; i++) 309 if (fifo_push(&ca->free[i], bucket)) 310 return true; 311 312 return false; 313 } 314 315 static int bch_allocator_thread(void *arg) 316 { 317 struct cache *ca = arg; 318 319 mutex_lock(&ca->set->bucket_lock); 320 321 while (1) { 322 /* 323 * First, we pull buckets off of the unused and free_inc lists, 324 * possibly issue discards to them, then we add the bucket to 325 * the free list: 326 */ 327 while (1) { 328 long bucket; 329 330 if (!fifo_pop(&ca->free_inc, bucket)) 331 break; 332 333 if (ca->discard) { 334 mutex_unlock(&ca->set->bucket_lock); 335 blkdev_issue_discard(ca->bdev, 336 bucket_to_sector(ca->set, bucket), 337 ca->sb.bucket_size, GFP_KERNEL); 338 mutex_lock(&ca->set->bucket_lock); 339 } 340 341 allocator_wait(ca, bch_allocator_push(ca, bucket)); 342 wake_up(&ca->set->btree_cache_wait); 343 wake_up(&ca->set->bucket_wait); 344 } 345 346 /* 347 * We've run out of free buckets, we need to find some buckets 348 * we can invalidate. First, invalidate them in memory and add 349 * them to the free_inc list: 350 */ 351 352 retry_invalidate: 353 allocator_wait(ca, !ca->invalidate_needs_gc); 354 invalidate_buckets(ca); 355 356 /* 357 * Now, we write their new gens to disk so we can start writing 358 * new stuff to them: 359 */ 360 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); 361 if (CACHE_SYNC(&ca->sb)) { 362 /* 363 * This could deadlock if an allocation with a btree 364 * node locked ever blocked - having the btree node 365 * locked would block garbage collection, but here we're 366 * waiting on garbage collection before we invalidate 367 * and free anything. 368 * 369 * But this should be safe since the btree code always 370 * uses btree_check_reserve() before allocating now, and 371 * if it fails it blocks without btree nodes locked. 372 */ 373 if (!fifo_full(&ca->free_inc)) 374 goto retry_invalidate; 375 376 if (bch_prio_write(ca, false) < 0) { 377 ca->invalidate_needs_gc = 1; 378 wake_up_gc(ca->set); 379 } 380 } 381 } 382 out: 383 wait_for_kthread_stop(); 384 return 0; 385 } 386 387 /* Allocation */ 388 389 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) 390 { 391 DEFINE_WAIT(w); 392 struct bucket *b; 393 long r; 394 395 396 /* No allocation if CACHE_SET_IO_DISABLE bit is set */ 397 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags))) 398 return -1; 399 400 /* fastpath */ 401 if (fifo_pop(&ca->free[RESERVE_NONE], r) || 402 fifo_pop(&ca->free[reserve], r)) 403 goto out; 404 405 if (!wait) { 406 trace_bcache_alloc_fail(ca, reserve); 407 return -1; 408 } 409 410 do { 411 prepare_to_wait(&ca->set->bucket_wait, &w, 412 TASK_UNINTERRUPTIBLE); 413 414 mutex_unlock(&ca->set->bucket_lock); 415 schedule(); 416 mutex_lock(&ca->set->bucket_lock); 417 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) && 418 !fifo_pop(&ca->free[reserve], r)); 419 420 finish_wait(&ca->set->bucket_wait, &w); 421 out: 422 if (ca->alloc_thread) 423 wake_up_process(ca->alloc_thread); 424 425 trace_bcache_alloc(ca, reserve); 426 427 if (expensive_debug_checks(ca->set)) { 428 size_t iter; 429 long i; 430 unsigned int j; 431 432 for (iter = 0; iter < prio_buckets(ca) * 2; iter++) 433 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); 434 435 for (j = 0; j < RESERVE_NR; j++) 436 fifo_for_each(i, &ca->free[j], iter) 437 BUG_ON(i == r); 438 fifo_for_each(i, &ca->free_inc, iter) 439 BUG_ON(i == r); 440 } 441 442 b = ca->buckets + r; 443 444 BUG_ON(atomic_read(&b->pin) != 1); 445 446 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); 447 448 if (reserve <= RESERVE_PRIO) { 449 SET_GC_MARK(b, GC_MARK_METADATA); 450 SET_GC_MOVE(b, 0); 451 b->prio = BTREE_PRIO; 452 } else { 453 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 454 SET_GC_MOVE(b, 0); 455 b->prio = INITIAL_PRIO; 456 } 457 458 if (ca->set->avail_nbuckets > 0) { 459 ca->set->avail_nbuckets--; 460 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); 461 } 462 463 return r; 464 } 465 466 void __bch_bucket_free(struct cache *ca, struct bucket *b) 467 { 468 SET_GC_MARK(b, 0); 469 SET_GC_SECTORS_USED(b, 0); 470 471 if (ca->set->avail_nbuckets < ca->set->nbuckets) { 472 ca->set->avail_nbuckets++; 473 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); 474 } 475 } 476 477 void bch_bucket_free(struct cache_set *c, struct bkey *k) 478 { 479 unsigned int i; 480 481 for (i = 0; i < KEY_PTRS(k); i++) 482 __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i)); 483 } 484 485 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, 486 struct bkey *k, bool wait) 487 { 488 struct cache *ca; 489 long b; 490 491 /* No allocation if CACHE_SET_IO_DISABLE bit is set */ 492 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) 493 return -1; 494 495 lockdep_assert_held(&c->bucket_lock); 496 497 bkey_init(k); 498 499 ca = c->cache; 500 b = bch_bucket_alloc(ca, reserve, wait); 501 if (b < 0) 502 return -1; 503 504 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, 505 bucket_to_sector(c, b), 506 ca->sb.nr_this_dev); 507 508 SET_KEY_PTRS(k, 1); 509 510 return 0; 511 } 512 513 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, 514 struct bkey *k, bool wait) 515 { 516 int ret; 517 518 mutex_lock(&c->bucket_lock); 519 ret = __bch_bucket_alloc_set(c, reserve, k, wait); 520 mutex_unlock(&c->bucket_lock); 521 return ret; 522 } 523 524 /* Sector allocator */ 525 526 struct open_bucket { 527 struct list_head list; 528 unsigned int last_write_point; 529 unsigned int sectors_free; 530 BKEY_PADDED(key); 531 }; 532 533 /* 534 * We keep multiple buckets open for writes, and try to segregate different 535 * write streams for better cache utilization: first we try to segregate flash 536 * only volume write streams from cached devices, secondly we look for a bucket 537 * where the last write to it was sequential with the current write, and 538 * failing that we look for a bucket that was last used by the same task. 539 * 540 * The ideas is if you've got multiple tasks pulling data into the cache at the 541 * same time, you'll get better cache utilization if you try to segregate their 542 * data and preserve locality. 543 * 544 * For example, dirty sectors of flash only volume is not reclaimable, if their 545 * dirty sectors mixed with dirty sectors of cached device, such buckets will 546 * be marked as dirty and won't be reclaimed, though the dirty data of cached 547 * device have been written back to backend device. 548 * 549 * And say you've starting Firefox at the same time you're copying a 550 * bunch of files. Firefox will likely end up being fairly hot and stay in the 551 * cache awhile, but the data you copied might not be; if you wrote all that 552 * data to the same buckets it'd get invalidated at the same time. 553 * 554 * Both of those tasks will be doing fairly random IO so we can't rely on 555 * detecting sequential IO to segregate their data, but going off of the task 556 * should be a sane heuristic. 557 */ 558 static struct open_bucket *pick_data_bucket(struct cache_set *c, 559 const struct bkey *search, 560 unsigned int write_point, 561 struct bkey *alloc) 562 { 563 struct open_bucket *ret, *ret_task = NULL; 564 565 list_for_each_entry_reverse(ret, &c->data_buckets, list) 566 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != 567 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) 568 continue; 569 else if (!bkey_cmp(&ret->key, search)) 570 goto found; 571 else if (ret->last_write_point == write_point) 572 ret_task = ret; 573 574 ret = ret_task ?: list_first_entry(&c->data_buckets, 575 struct open_bucket, list); 576 found: 577 if (!ret->sectors_free && KEY_PTRS(alloc)) { 578 ret->sectors_free = c->cache->sb.bucket_size; 579 bkey_copy(&ret->key, alloc); 580 bkey_init(alloc); 581 } 582 583 if (!ret->sectors_free) 584 ret = NULL; 585 586 return ret; 587 } 588 589 /* 590 * Allocates some space in the cache to write to, and k to point to the newly 591 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the 592 * end of the newly allocated space). 593 * 594 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many 595 * sectors were actually allocated. 596 * 597 * If s->writeback is true, will not fail. 598 */ 599 bool bch_alloc_sectors(struct cache_set *c, 600 struct bkey *k, 601 unsigned int sectors, 602 unsigned int write_point, 603 unsigned int write_prio, 604 bool wait) 605 { 606 struct open_bucket *b; 607 BKEY_PADDED(key) alloc; 608 unsigned int i; 609 610 /* 611 * We might have to allocate a new bucket, which we can't do with a 612 * spinlock held. So if we have to allocate, we drop the lock, allocate 613 * and then retry. KEY_PTRS() indicates whether alloc points to 614 * allocated bucket(s). 615 */ 616 617 bkey_init(&alloc.key); 618 spin_lock(&c->data_bucket_lock); 619 620 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { 621 unsigned int watermark = write_prio 622 ? RESERVE_MOVINGGC 623 : RESERVE_NONE; 624 625 spin_unlock(&c->data_bucket_lock); 626 627 if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait)) 628 return false; 629 630 spin_lock(&c->data_bucket_lock); 631 } 632 633 /* 634 * If we had to allocate, we might race and not need to allocate the 635 * second time we call pick_data_bucket(). If we allocated a bucket but 636 * didn't use it, drop the refcount bch_bucket_alloc_set() took: 637 */ 638 if (KEY_PTRS(&alloc.key)) 639 bkey_put(c, &alloc.key); 640 641 for (i = 0; i < KEY_PTRS(&b->key); i++) 642 EBUG_ON(ptr_stale(c, &b->key, i)); 643 644 /* Set up the pointer to the space we're allocating: */ 645 646 for (i = 0; i < KEY_PTRS(&b->key); i++) 647 k->ptr[i] = b->key.ptr[i]; 648 649 sectors = min(sectors, b->sectors_free); 650 651 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); 652 SET_KEY_SIZE(k, sectors); 653 SET_KEY_PTRS(k, KEY_PTRS(&b->key)); 654 655 /* 656 * Move b to the end of the lru, and keep track of what this bucket was 657 * last used for: 658 */ 659 list_move_tail(&b->list, &c->data_buckets); 660 bkey_copy_key(&b->key, k); 661 b->last_write_point = write_point; 662 663 b->sectors_free -= sectors; 664 665 for (i = 0; i < KEY_PTRS(&b->key); i++) { 666 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); 667 668 atomic_long_add(sectors, 669 &c->cache->sectors_written); 670 } 671 672 if (b->sectors_free < c->cache->sb.block_size) 673 b->sectors_free = 0; 674 675 /* 676 * k takes refcounts on the buckets it points to until it's inserted 677 * into the btree, but if we're done with this bucket we just transfer 678 * get_data_bucket()'s refcount. 679 */ 680 if (b->sectors_free) 681 for (i = 0; i < KEY_PTRS(&b->key); i++) 682 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); 683 684 spin_unlock(&c->data_bucket_lock); 685 return true; 686 } 687 688 /* Init */ 689 690 void bch_open_buckets_free(struct cache_set *c) 691 { 692 struct open_bucket *b; 693 694 while (!list_empty(&c->data_buckets)) { 695 b = list_first_entry(&c->data_buckets, 696 struct open_bucket, list); 697 list_del(&b->list); 698 kfree(b); 699 } 700 } 701 702 int bch_open_buckets_alloc(struct cache_set *c) 703 { 704 int i; 705 706 spin_lock_init(&c->data_bucket_lock); 707 708 for (i = 0; i < MAX_OPEN_BUCKETS; i++) { 709 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); 710 711 if (!b) 712 return -ENOMEM; 713 714 list_add(&b->list, &c->data_buckets); 715 } 716 717 return 0; 718 } 719 720 int bch_cache_allocator_start(struct cache *ca) 721 { 722 struct task_struct *k = kthread_run(bch_allocator_thread, 723 ca, "bcache_allocator"); 724 if (IS_ERR(k)) 725 return PTR_ERR(k); 726 727 ca->alloc_thread = k; 728 return 0; 729 } 730