raid5-ppl.c (498495dba268b20e8eadd7fe93c140c68b6cc9d2) | raid5-ppl.c (1532d9e87e8b2377f12929f9e40724d5fbe6ecc5) |
---|---|
1/* 2 * Partial Parity Log for closing the RAID5 write hole 3 * Copyright (c) 2017, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * --- 71 unchanged lines hidden (view full) --- 80 * 81 * An io_unit is used to gather stripes until it is submitted or becomes full 82 * (if the maximum number of entries or size of PPL is reached). Another io_unit 83 * can't be submitted until the previous has completed (PPL and stripe 84 * data+parity is written). The log->io_list tracks all io_units of a log 85 * (for a single member disk). New io_units are added to the end of the list 86 * and the first io_unit is submitted, if it is not submitted already. 87 * The current io_unit accepting new stripes is always at the end of the list. | 1/* 2 * Partial Parity Log for closing the RAID5 write hole 3 * Copyright (c) 2017, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * --- 71 unchanged lines hidden (view full) --- 80 * 81 * An io_unit is used to gather stripes until it is submitted or becomes full 82 * (if the maximum number of entries or size of PPL is reached). Another io_unit 83 * can't be submitted until the previous has completed (PPL and stripe 84 * data+parity is written). The log->io_list tracks all io_units of a log 85 * (for a single member disk). New io_units are added to the end of the list 86 * and the first io_unit is submitted, if it is not submitted already. 87 * The current io_unit accepting new stripes is always at the end of the list. |
88 * 89 * If write-back cache is enabled for any of the disks in the array, its data 90 * must be flushed before next io_unit is submitted. |
|
88 */ 89 90#define PPL_SPACE_SIZE (128 * 1024) 91 92struct ppl_conf { 93 struct mddev *mddev; 94 95 /* array of child logs, one for each raid disk */ 96 struct ppl_log *child_logs; 97 int count; 98 99 int block_size; /* the logical block size used for data_sector 100 * in ppl_header_entry */ 101 u32 signature; /* raid array identifier */ 102 atomic64_t seq; /* current log write sequence number */ 103 104 struct kmem_cache *io_kc; 105 mempool_t *io_pool; 106 struct bio_set *bs; | 91 */ 92 93#define PPL_SPACE_SIZE (128 * 1024) 94 95struct ppl_conf { 96 struct mddev *mddev; 97 98 /* array of child logs, one for each raid disk */ 99 struct ppl_log *child_logs; 100 int count; 101 102 int block_size; /* the logical block size used for data_sector 103 * in ppl_header_entry */ 104 u32 signature; /* raid array identifier */ 105 atomic64_t seq; /* current log write sequence number */ 106 107 struct kmem_cache *io_kc; 108 mempool_t *io_pool; 109 struct bio_set *bs; |
110 struct bio_set *flush_bs; |
|
107 108 /* used only for recovery */ 109 int recovered_entries; 110 int mismatch_count; 111 112 /* stripes to retry if failed to allocate io_unit */ 113 struct list_head no_mem_stripes; 114 spinlock_t no_mem_stripes_lock; --- 8 unchanged lines hidden (view full) --- 123 struct ppl_io_unit *current_io; /* current io_unit accepting new data 124 * always at the end of io_list */ 125 spinlock_t io_list_lock; 126 struct list_head io_list; /* all io_units of this log */ 127 128 sector_t next_io_sector; 129 unsigned int entry_space; 130 bool use_multippl; | 111 112 /* used only for recovery */ 113 int recovered_entries; 114 int mismatch_count; 115 116 /* stripes to retry if failed to allocate io_unit */ 117 struct list_head no_mem_stripes; 118 spinlock_t no_mem_stripes_lock; --- 8 unchanged lines hidden (view full) --- 127 struct ppl_io_unit *current_io; /* current io_unit accepting new data 128 * always at the end of io_list */ 129 spinlock_t io_list_lock; 130 struct list_head io_list; /* all io_units of this log */ 131 132 sector_t next_io_sector; 133 unsigned int entry_space; 134 bool use_multippl; |
135 bool wb_cache_on; 136 unsigned long disk_flush_bitmap; |
|
131}; 132 133#define PPL_IO_INLINE_BVECS 32 134 135struct ppl_io_unit { 136 struct ppl_log *log; 137 138 struct page *header_page; /* for ppl_header */ 139 140 unsigned int entries_count; /* number of entries in ppl_header */ 141 unsigned int pp_size; /* total size current of partial parity */ 142 143 u64 seq; /* sequence number of this log write */ 144 struct list_head log_sibling; /* log->io_list */ 145 146 struct list_head stripe_list; /* stripes added to the io_unit */ 147 atomic_t pending_stripes; /* how many stripes not written to raid */ | 137}; 138 139#define PPL_IO_INLINE_BVECS 32 140 141struct ppl_io_unit { 142 struct ppl_log *log; 143 144 struct page *header_page; /* for ppl_header */ 145 146 unsigned int entries_count; /* number of entries in ppl_header */ 147 unsigned int pp_size; /* total size current of partial parity */ 148 149 u64 seq; /* sequence number of this log write */ 150 struct list_head log_sibling; /* log->io_list */ 151 152 struct list_head stripe_list; /* stripes added to the io_unit */ 153 atomic_t pending_stripes; /* how many stripes not written to raid */ |
154 atomic_t pending_flushes; /* how many disk flushes are in progress */ |
|
148 149 bool submitted; /* true if write to log started */ 150 151 /* inline bio and its biovec for submitting the iounit */ 152 struct bio bio; 153 struct bio_vec biovec[PPL_IO_INLINE_BVECS]; 154}; 155 --- 88 unchanged lines hidden (view full) --- 244 header_page = io->header_page; 245 memset(io, 0, sizeof(*io)); 246 io->header_page = header_page; 247 248 io->log = log; 249 INIT_LIST_HEAD(&io->log_sibling); 250 INIT_LIST_HEAD(&io->stripe_list); 251 atomic_set(&io->pending_stripes, 0); | 155 156 bool submitted; /* true if write to log started */ 157 158 /* inline bio and its biovec for submitting the iounit */ 159 struct bio bio; 160 struct bio_vec biovec[PPL_IO_INLINE_BVECS]; 161}; 162 --- 88 unchanged lines hidden (view full) --- 251 header_page = io->header_page; 252 memset(io, 0, sizeof(*io)); 253 io->header_page = header_page; 254 255 io->log = log; 256 INIT_LIST_HEAD(&io->log_sibling); 257 INIT_LIST_HEAD(&io->stripe_list); 258 atomic_set(&io->pending_stripes, 0); |
259 atomic_set(&io->pending_flushes, 0); |
|
252 bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS); 253 254 pplhdr = page_address(io->header_page); 255 clear_page(pplhdr); 256 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED); 257 pplhdr->signature = cpu_to_le32(ppl_conf->signature); 258 259 io->seq = atomic64_add_return(1, &ppl_conf->seq); --- 210 unchanged lines hidden (view full) --- 470 bio_add_page(bio, io->header_page, PAGE_SIZE, 0); 471 472 pr_debug("%s: log->current_io_sector: %llu\n", __func__, 473 (unsigned long long)log->next_io_sector); 474 475 if (log->use_multippl) 476 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9; 477 | 260 bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS); 261 262 pplhdr = page_address(io->header_page); 263 clear_page(pplhdr); 264 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED); 265 pplhdr->signature = cpu_to_le32(ppl_conf->signature); 266 267 io->seq = atomic64_add_return(1, &ppl_conf->seq); --- 210 unchanged lines hidden (view full) --- 478 bio_add_page(bio, io->header_page, PAGE_SIZE, 0); 479 480 pr_debug("%s: log->current_io_sector: %llu\n", __func__, 481 (unsigned long long)log->next_io_sector); 482 483 if (log->use_multippl) 484 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9; 485 |
486 WARN_ON(log->disk_flush_bitmap != 0); 487 |
|
478 list_for_each_entry(sh, &io->stripe_list, log_list) { | 488 list_for_each_entry(sh, &io->stripe_list, log_list) { |
489 for (i = 0; i < sh->disks; i++) { 490 struct r5dev *dev = &sh->dev[i]; 491 492 if ((ppl_conf->child_logs[i].wb_cache_on) && 493 (test_bit(R5_Wantwrite, &dev->flags))) { 494 set_bit(i, &log->disk_flush_bitmap); 495 } 496 } 497 |
|
479 /* entries for full stripe writes have no partial parity */ 480 if (test_bit(STRIPE_FULL_WRITE, &sh->state)) 481 continue; 482 483 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { 484 struct bio *prev = bio; 485 486 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, --- 48 unchanged lines hidden (view full) --- 535 mutex_unlock(&log->io_mutex); 536 } 537} 538 539static void ppl_io_unit_finished(struct ppl_io_unit *io) 540{ 541 struct ppl_log *log = io->log; 542 struct ppl_conf *ppl_conf = log->ppl_conf; | 498 /* entries for full stripe writes have no partial parity */ 499 if (test_bit(STRIPE_FULL_WRITE, &sh->state)) 500 continue; 501 502 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { 503 struct bio *prev = bio; 504 505 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, --- 48 unchanged lines hidden (view full) --- 554 mutex_unlock(&log->io_mutex); 555 } 556} 557 558static void ppl_io_unit_finished(struct ppl_io_unit *io) 559{ 560 struct ppl_log *log = io->log; 561 struct ppl_conf *ppl_conf = log->ppl_conf; |
562 struct r5conf *conf = ppl_conf->mddev->private; |
|
543 unsigned long flags; 544 545 pr_debug("%s: seq: %llu\n", __func__, io->seq); 546 547 local_irq_save(flags); 548 549 spin_lock(&log->io_list_lock); 550 list_del(&io->log_sibling); --- 9 unchanged lines hidden (view full) --- 560 struct stripe_head, log_list); 561 list_del_init(&sh->log_list); 562 set_bit(STRIPE_HANDLE, &sh->state); 563 raid5_release_stripe(sh); 564 } 565 spin_unlock(&ppl_conf->no_mem_stripes_lock); 566 567 local_irq_restore(flags); | 563 unsigned long flags; 564 565 pr_debug("%s: seq: %llu\n", __func__, io->seq); 566 567 local_irq_save(flags); 568 569 spin_lock(&log->io_list_lock); 570 list_del(&io->log_sibling); --- 9 unchanged lines hidden (view full) --- 580 struct stripe_head, log_list); 581 list_del_init(&sh->log_list); 582 set_bit(STRIPE_HANDLE, &sh->state); 583 raid5_release_stripe(sh); 584 } 585 spin_unlock(&ppl_conf->no_mem_stripes_lock); 586 587 local_irq_restore(flags); |
588 589 wake_up(&conf->wait_for_quiescent); |
|
568} 569 | 590} 591 |
592static void ppl_flush_endio(struct bio *bio) 593{ 594 struct ppl_io_unit *io = bio->bi_private; 595 struct ppl_log *log = io->log; 596 struct ppl_conf *ppl_conf = log->ppl_conf; 597 struct r5conf *conf = ppl_conf->mddev->private; 598 char b[BDEVNAME_SIZE]; 599 600 pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b)); 601 602 if (bio->bi_status) { 603 struct md_rdev *rdev; 604 605 rcu_read_lock(); 606 rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio)); 607 if (rdev) 608 md_error(rdev->mddev, rdev); 609 rcu_read_unlock(); 610 } 611 612 bio_put(bio); 613 614 if (atomic_dec_and_test(&io->pending_flushes)) { 615 ppl_io_unit_finished(io); 616 md_wakeup_thread(conf->mddev->thread); 617 } 618} 619 620static void ppl_do_flush(struct ppl_io_unit *io) 621{ 622 struct ppl_log *log = io->log; 623 struct ppl_conf *ppl_conf = log->ppl_conf; 624 struct r5conf *conf = ppl_conf->mddev->private; 625 int raid_disks = conf->raid_disks; 626 int flushed_disks = 0; 627 int i; 628 629 atomic_set(&io->pending_flushes, raid_disks); 630 631 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) { 632 struct md_rdev *rdev; 633 struct block_device *bdev = NULL; 634 635 rcu_read_lock(); 636 rdev = rcu_dereference(conf->disks[i].rdev); 637 if (rdev && !test_bit(Faulty, &rdev->flags)) 638 bdev = rdev->bdev; 639 rcu_read_unlock(); 640 641 if (bdev) { 642 struct bio *bio; 643 char b[BDEVNAME_SIZE]; 644 645 bio = bio_alloc_bioset(GFP_NOIO, 0, ppl_conf->flush_bs); 646 bio_set_dev(bio, bdev); 647 bio->bi_private = io; 648 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 649 bio->bi_end_io = ppl_flush_endio; 650 651 pr_debug("%s: dev: %s\n", __func__, 652 bio_devname(bio, b)); 653 654 submit_bio(bio); 655 flushed_disks++; 656 } 657 } 658 659 log->disk_flush_bitmap = 0; 660 661 for (i = flushed_disks ; i < raid_disks; i++) { 662 if (atomic_dec_and_test(&io->pending_flushes)) 663 ppl_io_unit_finished(io); 664 } 665} 666 667static inline bool ppl_no_io_unit_submitted(struct r5conf *conf, 668 struct ppl_log *log) 669{ 670 struct ppl_io_unit *io; 671 672 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit, 673 log_sibling); 674 675 return !io || !io->submitted; 676} 677 678void ppl_quiesce(struct r5conf *conf, int quiesce) 679{ 680 struct ppl_conf *ppl_conf = conf->log_private; 681 int i; 682 683 if (quiesce) { 684 for (i = 0; i < ppl_conf->count; i++) { 685 struct ppl_log *log = &ppl_conf->child_logs[i]; 686 687 spin_lock_irq(&log->io_list_lock); 688 wait_event_lock_irq(conf->wait_for_quiescent, 689 ppl_no_io_unit_submitted(conf, log), 690 log->io_list_lock); 691 spin_unlock_irq(&log->io_list_lock); 692 } 693 } 694} 695 |
|
570void ppl_stripe_write_finished(struct stripe_head *sh) 571{ 572 struct ppl_io_unit *io; 573 574 io = sh->ppl_io; 575 sh->ppl_io = NULL; 576 | 696void ppl_stripe_write_finished(struct stripe_head *sh) 697{ 698 struct ppl_io_unit *io; 699 700 io = sh->ppl_io; 701 sh->ppl_io = NULL; 702 |
577 if (io && atomic_dec_and_test(&io->pending_stripes)) 578 ppl_io_unit_finished(io); | 703 if (io && atomic_dec_and_test(&io->pending_stripes)) { 704 if (io->log->disk_flush_bitmap) 705 ppl_do_flush(io); 706 else 707 ppl_io_unit_finished(io); 708 } |
579} 580 581static void ppl_xor(int size, struct page *page1, struct page *page2) 582{ 583 struct async_submit_ctl submit; 584 struct dma_async_tx_descriptor *tx; 585 struct page *xor_srcs[] = { page1, page2 }; 586 --- 516 unchanged lines hidden (view full) --- 1103{ 1104 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); 1105 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags); 1106 1107 kfree(ppl_conf->child_logs); 1108 1109 if (ppl_conf->bs) 1110 bioset_free(ppl_conf->bs); | 709} 710 711static void ppl_xor(int size, struct page *page1, struct page *page2) 712{ 713 struct async_submit_ctl submit; 714 struct dma_async_tx_descriptor *tx; 715 struct page *xor_srcs[] = { page1, page2 }; 716 --- 516 unchanged lines hidden (view full) --- 1233{ 1234 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); 1235 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags); 1236 1237 kfree(ppl_conf->child_logs); 1238 1239 if (ppl_conf->bs) 1240 bioset_free(ppl_conf->bs); |
1241 if (ppl_conf->flush_bs) 1242 bioset_free(ppl_conf->flush_bs); |
|
1111 mempool_destroy(ppl_conf->io_pool); 1112 kmem_cache_destroy(ppl_conf->io_kc); 1113 1114 kfree(ppl_conf); 1115} 1116 1117void ppl_exit_log(struct r5conf *conf) 1118{ --- 49 unchanged lines hidden (view full) --- 1168 1169 rdev->ppl.size = ppl_size_new; 1170 1171 return 0; 1172} 1173 1174static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) 1175{ | 1243 mempool_destroy(ppl_conf->io_pool); 1244 kmem_cache_destroy(ppl_conf->io_kc); 1245 1246 kfree(ppl_conf); 1247} 1248 1249void ppl_exit_log(struct r5conf *conf) 1250{ --- 49 unchanged lines hidden (view full) --- 1300 1301 rdev->ppl.size = ppl_size_new; 1302 1303 return 0; 1304} 1305 1306static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) 1307{ |
1308 struct request_queue *q; 1309 |
|
1176 if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE + 1177 PPL_HEADER_SIZE) * 2) { 1178 log->use_multippl = true; 1179 set_bit(MD_HAS_MULTIPLE_PPLS, 1180 &log->ppl_conf->mddev->flags); 1181 log->entry_space = PPL_SPACE_SIZE; 1182 } else { 1183 log->use_multippl = false; 1184 log->entry_space = (log->rdev->ppl.size << 9) - 1185 PPL_HEADER_SIZE; 1186 } 1187 log->next_io_sector = rdev->ppl.sector; | 1310 if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE + 1311 PPL_HEADER_SIZE) * 2) { 1312 log->use_multippl = true; 1313 set_bit(MD_HAS_MULTIPLE_PPLS, 1314 &log->ppl_conf->mddev->flags); 1315 log->entry_space = PPL_SPACE_SIZE; 1316 } else { 1317 log->use_multippl = false; 1318 log->entry_space = (log->rdev->ppl.size << 9) - 1319 PPL_HEADER_SIZE; 1320 } 1321 log->next_io_sector = rdev->ppl.sector; |
1322 1323 q = bdev_get_queue(rdev->bdev); 1324 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 1325 log->wb_cache_on = true; |
|
1188} 1189 1190int ppl_init_log(struct r5conf *conf) 1191{ 1192 struct ppl_conf *ppl_conf; 1193 struct mddev *mddev = conf->mddev; 1194 int ret = 0; | 1326} 1327 1328int ppl_init_log(struct r5conf *conf) 1329{ 1330 struct ppl_conf *ppl_conf; 1331 struct mddev *mddev = conf->mddev; 1332 int ret = 0; |
1333 int max_disks; |
|
1195 int i; | 1334 int i; |
1196 bool need_cache_flush = false; | |
1197 1198 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n", 1199 mdname(conf->mddev)); 1200 1201 if (PAGE_SIZE != 4096) 1202 return -EINVAL; 1203 1204 if (mddev->level != 5) { --- 9 unchanged lines hidden (view full) --- 1214 } 1215 1216 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 1217 pr_warn("md/raid:%s PPL is not compatible with journal\n", 1218 mdname(mddev)); 1219 return -EINVAL; 1220 } 1221 | 1335 1336 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n", 1337 mdname(conf->mddev)); 1338 1339 if (PAGE_SIZE != 4096) 1340 return -EINVAL; 1341 1342 if (mddev->level != 5) { --- 9 unchanged lines hidden (view full) --- 1352 } 1353 1354 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 1355 pr_warn("md/raid:%s PPL is not compatible with journal\n", 1356 mdname(mddev)); 1357 return -EINVAL; 1358 } 1359 |
1360 max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) * 1361 BITS_PER_BYTE; 1362 if (conf->raid_disks > max_disks) { 1363 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n", 1364 mdname(mddev), max_disks); 1365 return -EINVAL; 1366 } 1367 |
|
1222 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL); 1223 if (!ppl_conf) 1224 return -ENOMEM; 1225 1226 ppl_conf->mddev = mddev; 1227 1228 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0); 1229 if (!ppl_conf->io_kc) { --- 9 unchanged lines hidden (view full) --- 1239 } 1240 1241 ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS); 1242 if (!ppl_conf->bs) { 1243 ret = -ENOMEM; 1244 goto err; 1245 } 1246 | 1368 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL); 1369 if (!ppl_conf) 1370 return -ENOMEM; 1371 1372 ppl_conf->mddev = mddev; 1373 1374 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0); 1375 if (!ppl_conf->io_kc) { --- 9 unchanged lines hidden (view full) --- 1385 } 1386 1387 ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS); 1388 if (!ppl_conf->bs) { 1389 ret = -ENOMEM; 1390 goto err; 1391 } 1392 |
1393 ppl_conf->flush_bs = bioset_create(conf->raid_disks, 0, 0); 1394 if (!ppl_conf->flush_bs) { 1395 ret = -ENOMEM; 1396 goto err; 1397 } 1398 |
|
1247 ppl_conf->count = conf->raid_disks; 1248 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log), 1249 GFP_KERNEL); 1250 if (!ppl_conf->child_logs) { 1251 ret = -ENOMEM; 1252 goto err; 1253 } 1254 --- 15 unchanged lines hidden (view full) --- 1270 mutex_init(&log->io_mutex); 1271 spin_lock_init(&log->io_list_lock); 1272 INIT_LIST_HEAD(&log->io_list); 1273 1274 log->ppl_conf = ppl_conf; 1275 log->rdev = rdev; 1276 1277 if (rdev) { | 1399 ppl_conf->count = conf->raid_disks; 1400 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log), 1401 GFP_KERNEL); 1402 if (!ppl_conf->child_logs) { 1403 ret = -ENOMEM; 1404 goto err; 1405 } 1406 --- 15 unchanged lines hidden (view full) --- 1422 mutex_init(&log->io_mutex); 1423 spin_lock_init(&log->io_list_lock); 1424 INIT_LIST_HEAD(&log->io_list); 1425 1426 log->ppl_conf = ppl_conf; 1427 log->rdev = rdev; 1428 1429 if (rdev) { |
1278 struct request_queue *q; 1279 | |
1280 ret = ppl_validate_rdev(rdev); 1281 if (ret) 1282 goto err; 1283 | 1430 ret = ppl_validate_rdev(rdev); 1431 if (ret) 1432 goto err; 1433 |
1284 q = bdev_get_queue(rdev->bdev); 1285 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 1286 need_cache_flush = true; | |
1287 ppl_init_child_log(log, rdev); 1288 } 1289 } 1290 | 1434 ppl_init_child_log(log, rdev); 1435 } 1436 } 1437 |
1291 if (need_cache_flush) 1292 pr_warn("md/raid:%s: Volatile write-back cache should be disabled on all member drives when using PPL!\n", 1293 mdname(mddev)); 1294 | |
1295 /* load and possibly recover the logs from the member disks */ 1296 ret = ppl_load(ppl_conf); 1297 1298 if (ret) { 1299 goto err; 1300 } else if (!mddev->pers && mddev->recovery_cp == 0 && 1301 ppl_conf->recovered_entries > 0 && 1302 ppl_conf->mismatch_count == 0) { --- 58 unchanged lines hidden --- | 1438 /* load and possibly recover the logs from the member disks */ 1439 ret = ppl_load(ppl_conf); 1440 1441 if (ret) { 1442 goto err; 1443 } else if (!mddev->pers && mddev->recovery_cp == 0 && 1444 ppl_conf->recovered_entries > 0 && 1445 ppl_conf->mismatch_count == 0) { --- 58 unchanged lines hidden --- |