12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2f6bed0efSShaohua Li /* 3f6bed0efSShaohua Li * Copyright (C) 2015 Shaohua Li <shli@fb.com> 4b4c625c6SSong Liu * Copyright (C) 2016 Song Liu <songliubraving@fb.com> 5f6bed0efSShaohua Li */ 6f6bed0efSShaohua Li #include <linux/kernel.h> 7f6bed0efSShaohua Li #include <linux/wait.h> 8f6bed0efSShaohua Li #include <linux/blkdev.h> 9f6bed0efSShaohua Li #include <linux/slab.h> 10f6bed0efSShaohua Li #include <linux/raid/md_p.h> 115cb2fbd6SShaohua Li #include <linux/crc32c.h> 12f6bed0efSShaohua Li #include <linux/random.h> 13ce1ccd07SShaohua Li #include <linux/kthread.h> 1403b047f4SSong Liu #include <linux/types.h> 15f6bed0efSShaohua Li #include "md.h" 16f6bed0efSShaohua Li #include "raid5.h" 17935fe098SMike Snitzer #include "md-bitmap.h" 1870d466f7SSong Liu #include "raid5-log.h" 19f6bed0efSShaohua Li 20f6bed0efSShaohua Li /* 21f6bed0efSShaohua Li * metadata/data stored in disk with 4k size unit (a block) regardless 22f6bed0efSShaohua Li * underneath hardware sector size. only works with PAGE_SIZE == 4096 23f6bed0efSShaohua Li */ 24f6bed0efSShaohua Li #define BLOCK_SECTORS (8) 25effe6ee7SSong Liu #define BLOCK_SECTOR_SHIFT (3) 26f6bed0efSShaohua Li 270576b1c6SShaohua Li /* 28a39f7afdSSong Liu * log->max_free_space is min(1/4 disk size, 10G reclaimable space). 29a39f7afdSSong Liu * 30a39f7afdSSong Liu * In write through mode, the reclaim runs every log->max_free_space. 31a39f7afdSSong Liu * This can prevent the recovery scans for too long 320576b1c6SShaohua Li */ 330576b1c6SShaohua Li #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ 340576b1c6SShaohua Li #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) 350576b1c6SShaohua Li 36a39f7afdSSong Liu /* wake up reclaim thread periodically */ 37a39f7afdSSong Liu #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ) 38a39f7afdSSong Liu /* start flush with these full stripes */ 3984890c03SShaohua Li #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4) 40a39f7afdSSong Liu /* reclaim stripes in groups */ 41a39f7afdSSong Liu #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2) 42a39f7afdSSong Liu 43c38d29b3SChristoph Hellwig /* 44c38d29b3SChristoph Hellwig * We only need 2 bios per I/O unit to make progress, but ensure we 45c38d29b3SChristoph Hellwig * have a few more available to not get too tight. 46c38d29b3SChristoph Hellwig */ 47c38d29b3SChristoph Hellwig #define R5L_POOL_SIZE 4 48c38d29b3SChristoph Hellwig 492c7da14bSSong Liu static char *r5c_journal_mode_str[] = {"write-through", 502c7da14bSSong Liu "write-back"}; 512ded3703SSong Liu /* 522ded3703SSong Liu * raid5 cache state machine 532ded3703SSong Liu * 549b69173eSJackieLiu * With the RAID cache, each stripe works in two phases: 552ded3703SSong Liu * - caching phase 562ded3703SSong Liu * - writing-out phase 572ded3703SSong Liu * 582ded3703SSong Liu * These two phases are controlled by bit STRIPE_R5C_CACHING: 592ded3703SSong Liu * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase 602ded3703SSong Liu * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase 612ded3703SSong Liu * 622ded3703SSong Liu * When there is no journal, or the journal is in write-through mode, 632ded3703SSong Liu * the stripe is always in writing-out phase. 642ded3703SSong Liu * 652ded3703SSong Liu * For write-back journal, the stripe is sent to caching phase on write 662ded3703SSong Liu * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off 672ded3703SSong Liu * the write-out phase by clearing STRIPE_R5C_CACHING. 682ded3703SSong Liu * 692ded3703SSong Liu * Stripes in caching phase do not write the raid disks. Instead, all 702ded3703SSong Liu * writes are committed from the log device. Therefore, a stripe in 712ded3703SSong Liu * caching phase handles writes as: 722ded3703SSong Liu * - write to log device 732ded3703SSong Liu * - return IO 742ded3703SSong Liu * 752ded3703SSong Liu * Stripes in writing-out phase handle writes as: 762ded3703SSong Liu * - calculate parity 772ded3703SSong Liu * - write pending data and parity to journal 782ded3703SSong Liu * - write data and parity to raid disks 792ded3703SSong Liu * - return IO for pending writes 802ded3703SSong Liu */ 812ded3703SSong Liu 82f6bed0efSShaohua Li struct r5l_log { 83f6bed0efSShaohua Li struct md_rdev *rdev; 84f6bed0efSShaohua Li 85f6bed0efSShaohua Li u32 uuid_checksum; 86f6bed0efSShaohua Li 87f6bed0efSShaohua Li sector_t device_size; /* log device size, round to 88f6bed0efSShaohua Li * BLOCK_SECTORS */ 890576b1c6SShaohua Li sector_t max_free_space; /* reclaim run if free space is at 900576b1c6SShaohua Li * this size */ 91f6bed0efSShaohua Li 92f6bed0efSShaohua Li sector_t last_checkpoint; /* log tail. where recovery scan 93f6bed0efSShaohua Li * starts from */ 94f6bed0efSShaohua Li u64 last_cp_seq; /* log tail sequence */ 95f6bed0efSShaohua Li 96f6bed0efSShaohua Li sector_t log_start; /* log head. where new data appends */ 97f6bed0efSShaohua Li u64 seq; /* log head sequence */ 98f6bed0efSShaohua Li 9917036461SChristoph Hellwig sector_t next_checkpoint; 10017036461SChristoph Hellwig 101f6bed0efSShaohua Li struct mutex io_mutex; 102f6bed0efSShaohua Li struct r5l_io_unit *current_io; /* current io_unit accepting new data */ 103f6bed0efSShaohua Li 104f6bed0efSShaohua Li spinlock_t io_list_lock; 105f6bed0efSShaohua Li struct list_head running_ios; /* io_units which are still running, 106f6bed0efSShaohua Li * and have not yet been completely 107f6bed0efSShaohua Li * written to the log */ 108f6bed0efSShaohua Li struct list_head io_end_ios; /* io_units which have been completely 109f6bed0efSShaohua Li * written to the log but not yet written 110f6bed0efSShaohua Li * to the RAID */ 111a8c34f91SShaohua Li struct list_head flushing_ios; /* io_units which are waiting for log 112a8c34f91SShaohua Li * cache flush */ 11304732f74SChristoph Hellwig struct list_head finished_ios; /* io_units which settle down in log disk */ 114a8c34f91SShaohua Li struct bio flush_bio; 115f6bed0efSShaohua Li 1165036c390SChristoph Hellwig struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */ 1175036c390SChristoph Hellwig 118f6bed0efSShaohua Li struct kmem_cache *io_kc; 119afeee514SKent Overstreet mempool_t io_pool; 120afeee514SKent Overstreet struct bio_set bs; 121afeee514SKent Overstreet mempool_t meta_pool; 122f6bed0efSShaohua Li 1230576b1c6SShaohua Li struct md_thread *reclaim_thread; 1240576b1c6SShaohua Li unsigned long reclaim_target; /* number of space that need to be 1250576b1c6SShaohua Li * reclaimed. if it's 0, reclaim spaces 1260576b1c6SShaohua Li * used by io_units which are in 1270576b1c6SShaohua Li * IO_UNIT_STRIPE_END state (eg, reclaim 1280576b1c6SShaohua Li * dones't wait for specific io_unit 1290576b1c6SShaohua Li * switching to IO_UNIT_STRIPE_END 1300576b1c6SShaohua Li * state) */ 1310fd22b45SShaohua Li wait_queue_head_t iounit_wait; 1320576b1c6SShaohua Li 133f6bed0efSShaohua Li struct list_head no_space_stripes; /* pending stripes, log has no space */ 134f6bed0efSShaohua Li spinlock_t no_space_stripes_lock; 13556fef7c6SChristoph Hellwig 13656fef7c6SChristoph Hellwig bool need_cache_flush; 1372ded3703SSong Liu 1382ded3703SSong Liu /* for r5c_cache */ 1392ded3703SSong Liu enum r5c_journal_mode r5c_journal_mode; 140a39f7afdSSong Liu 141a39f7afdSSong Liu /* all stripes in r5cache, in the order of seq at sh->log_start */ 142a39f7afdSSong Liu struct list_head stripe_in_journal_list; 143a39f7afdSSong Liu 144a39f7afdSSong Liu spinlock_t stripe_in_journal_lock; 145a39f7afdSSong Liu atomic_t stripe_in_journal_count; 1463bddb7f8SSong Liu 1473bddb7f8SSong Liu /* to submit async io_units, to fulfill ordering of flush */ 1483bddb7f8SSong Liu struct work_struct deferred_io_work; 1492e38a37fSSong Liu /* to disable write back during in degraded mode */ 1502e38a37fSSong Liu struct work_struct disable_writeback_work; 15103b047f4SSong Liu 15203b047f4SSong Liu /* to for chunk_aligned_read in writeback mode, details below */ 15303b047f4SSong Liu spinlock_t tree_lock; 15403b047f4SSong Liu struct radix_tree_root big_stripe_tree; 155f6bed0efSShaohua Li }; 156f6bed0efSShaohua Li 157f6bed0efSShaohua Li /* 15803b047f4SSong Liu * Enable chunk_aligned_read() with write back cache. 15903b047f4SSong Liu * 16003b047f4SSong Liu * Each chunk may contain more than one stripe (for example, a 256kB 16103b047f4SSong Liu * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For 16203b047f4SSong Liu * chunk_aligned_read, these stripes are grouped into one "big_stripe". 16303b047f4SSong Liu * For each big_stripe, we count how many stripes of this big_stripe 16403b047f4SSong Liu * are in the write back cache. These data are tracked in a radix tree 16503b047f4SSong Liu * (big_stripe_tree). We use radix_tree item pointer as the counter. 16603b047f4SSong Liu * r5c_tree_index() is used to calculate keys for the radix tree. 16703b047f4SSong Liu * 16803b047f4SSong Liu * chunk_aligned_read() calls r5c_big_stripe_cached() to look up 16903b047f4SSong Liu * big_stripe of each chunk in the tree. If this big_stripe is in the 17003b047f4SSong Liu * tree, chunk_aligned_read() aborts. This look up is protected by 17103b047f4SSong Liu * rcu_read_lock(). 17203b047f4SSong Liu * 17303b047f4SSong Liu * It is necessary to remember whether a stripe is counted in 17403b047f4SSong Liu * big_stripe_tree. Instead of adding new flag, we reuses existing flags: 17503b047f4SSong Liu * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these 17603b047f4SSong Liu * two flags are set, the stripe is counted in big_stripe_tree. This 17703b047f4SSong Liu * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to 17803b047f4SSong Liu * r5c_try_caching_write(); and moving clear_bit of 17903b047f4SSong Liu * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to 18003b047f4SSong Liu * r5c_finish_stripe_write_out(). 18103b047f4SSong Liu */ 18203b047f4SSong Liu 18303b047f4SSong Liu /* 18403b047f4SSong Liu * radix tree requests lowest 2 bits of data pointer to be 2b'00. 18503b047f4SSong Liu * So it is necessary to left shift the counter by 2 bits before using it 18603b047f4SSong Liu * as data pointer of the tree. 18703b047f4SSong Liu */ 18803b047f4SSong Liu #define R5C_RADIX_COUNT_SHIFT 2 18903b047f4SSong Liu 19003b047f4SSong Liu /* 19103b047f4SSong Liu * calculate key for big_stripe_tree 19203b047f4SSong Liu * 19303b047f4SSong Liu * sect: align_bi->bi_iter.bi_sector or sh->sector 19403b047f4SSong Liu */ 19503b047f4SSong Liu static inline sector_t r5c_tree_index(struct r5conf *conf, 19603b047f4SSong Liu sector_t sect) 19703b047f4SSong Liu { 19852923083SDamien Le Moal sector_div(sect, conf->chunk_sectors); 19903b047f4SSong Liu return sect; 20003b047f4SSong Liu } 20103b047f4SSong Liu 20203b047f4SSong Liu /* 203f6bed0efSShaohua Li * an IO range starts from a meta data block and end at the next meta data 204f6bed0efSShaohua Li * block. The io unit's the meta data block tracks data/parity followed it. io 205f6bed0efSShaohua Li * unit is written to log disk with normal write, as we always flush log disk 206f6bed0efSShaohua Li * first and then start move data to raid disks, there is no requirement to 207f6bed0efSShaohua Li * write io unit with FLUSH/FUA 208f6bed0efSShaohua Li */ 209f6bed0efSShaohua Li struct r5l_io_unit { 210f6bed0efSShaohua Li struct r5l_log *log; 211f6bed0efSShaohua Li 212f6bed0efSShaohua Li struct page *meta_page; /* store meta block */ 213f6bed0efSShaohua Li int meta_offset; /* current offset in meta_page */ 214f6bed0efSShaohua Li 215f6bed0efSShaohua Li struct bio *current_bio;/* current_bio accepting new data */ 216f6bed0efSShaohua Li 217f6bed0efSShaohua Li atomic_t pending_stripe;/* how many stripes not flushed to raid */ 218f6bed0efSShaohua Li u64 seq; /* seq number of the metablock */ 219f6bed0efSShaohua Li sector_t log_start; /* where the io_unit starts */ 220f6bed0efSShaohua Li sector_t log_end; /* where the io_unit ends */ 221f6bed0efSShaohua Li struct list_head log_sibling; /* log->running_ios */ 222f6bed0efSShaohua Li struct list_head stripe_list; /* stripes added to the io_unit */ 223f6bed0efSShaohua Li 224f6bed0efSShaohua Li int state; 2256143e2ceSChristoph Hellwig bool need_split_bio; 2263bddb7f8SSong Liu struct bio *split_bio; 2273bddb7f8SSong Liu 2283bddb7f8SSong Liu unsigned int has_flush:1; /* include flush request */ 2293bddb7f8SSong Liu unsigned int has_fua:1; /* include fua request */ 230a9501d74SSong Liu unsigned int has_null_flush:1; /* include null flush request */ 231a9501d74SSong Liu unsigned int has_flush_payload:1; /* include flush payload */ 2323bddb7f8SSong Liu /* 2333bddb7f8SSong Liu * io isn't sent yet, flush/fua request can only be submitted till it's 2343bddb7f8SSong Liu * the first IO in running_ios list 2353bddb7f8SSong Liu */ 2363bddb7f8SSong Liu unsigned int io_deferred:1; 2373bddb7f8SSong Liu 2383bddb7f8SSong Liu struct bio_list flush_barriers; /* size == 0 flush bios */ 239f6bed0efSShaohua Li }; 240f6bed0efSShaohua Li 241f6bed0efSShaohua Li /* r5l_io_unit state */ 242f6bed0efSShaohua Li enum r5l_io_unit_state { 243f6bed0efSShaohua Li IO_UNIT_RUNNING = 0, /* accepting new IO */ 244f6bed0efSShaohua Li IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, 245f6bed0efSShaohua Li * don't accepting new bio */ 246f6bed0efSShaohua Li IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ 247a8c34f91SShaohua Li IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ 248f6bed0efSShaohua Li }; 249f6bed0efSShaohua Li 2502ded3703SSong Liu bool r5c_is_writeback(struct r5l_log *log) 2512ded3703SSong Liu { 2522ded3703SSong Liu return (log != NULL && 2532ded3703SSong Liu log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK); 2542ded3703SSong Liu } 2552ded3703SSong Liu 256f6bed0efSShaohua Li static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) 257f6bed0efSShaohua Li { 258f6bed0efSShaohua Li start += inc; 259f6bed0efSShaohua Li if (start >= log->device_size) 260f6bed0efSShaohua Li start = start - log->device_size; 261f6bed0efSShaohua Li return start; 262f6bed0efSShaohua Li } 263f6bed0efSShaohua Li 264f6bed0efSShaohua Li static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, 265f6bed0efSShaohua Li sector_t end) 266f6bed0efSShaohua Li { 267f6bed0efSShaohua Li if (end >= start) 268f6bed0efSShaohua Li return end - start; 269f6bed0efSShaohua Li else 270f6bed0efSShaohua Li return end + log->device_size - start; 271f6bed0efSShaohua Li } 272f6bed0efSShaohua Li 273f6bed0efSShaohua Li static bool r5l_has_free_space(struct r5l_log *log, sector_t size) 274f6bed0efSShaohua Li { 275f6bed0efSShaohua Li sector_t used_size; 276f6bed0efSShaohua Li 277f6bed0efSShaohua Li used_size = r5l_ring_distance(log, log->last_checkpoint, 278f6bed0efSShaohua Li log->log_start); 279f6bed0efSShaohua Li 280f6bed0efSShaohua Li return log->device_size > used_size + size; 281f6bed0efSShaohua Li } 282f6bed0efSShaohua Li 283f6bed0efSShaohua Li static void __r5l_set_io_unit_state(struct r5l_io_unit *io, 284f6bed0efSShaohua Li enum r5l_io_unit_state state) 285f6bed0efSShaohua Li { 286f6bed0efSShaohua Li if (WARN_ON(io->state >= state)) 287f6bed0efSShaohua Li return; 288f6bed0efSShaohua Li io->state = state; 289f6bed0efSShaohua Li } 290f6bed0efSShaohua Li 2911e6d690bSSong Liu static void 292bd83d0a2SNeilBrown r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev) 2931e6d690bSSong Liu { 2941e6d690bSSong Liu struct bio *wbi, *wbi2; 2951e6d690bSSong Liu 2961e6d690bSSong Liu wbi = dev->written; 2971e6d690bSSong Liu dev->written = NULL; 2981e6d690bSSong Liu while (wbi && wbi->bi_iter.bi_sector < 299c911c46cSYufen Yu dev->sector + RAID5_STRIPE_SECTORS(conf)) { 300c911c46cSYufen Yu wbi2 = r5_next_bio(conf, wbi, dev->sector); 3011e6d690bSSong Liu md_write_end(conf->mddev); 302bd83d0a2SNeilBrown bio_endio(wbi); 3031e6d690bSSong Liu wbi = wbi2; 3041e6d690bSSong Liu } 3051e6d690bSSong Liu } 3061e6d690bSSong Liu 3071e6d690bSSong Liu void r5c_handle_cached_data_endio(struct r5conf *conf, 308bd83d0a2SNeilBrown struct stripe_head *sh, int disks) 3091e6d690bSSong Liu { 3101e6d690bSSong Liu int i; 3111e6d690bSSong Liu 3121e6d690bSSong Liu for (i = sh->disks; i--; ) { 3131e6d690bSSong Liu if (sh->dev[i].written) { 3141e6d690bSSong Liu set_bit(R5_UPTODATE, &sh->dev[i].flags); 315bd83d0a2SNeilBrown r5c_return_dev_pending_writes(conf, &sh->dev[i]); 316e64e4018SAndy Shevchenko md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, 317c911c46cSYufen Yu RAID5_STRIPE_SECTORS(conf), 3181e6d690bSSong Liu !test_bit(STRIPE_DEGRADED, &sh->state), 3191e6d690bSSong Liu 0); 3201e6d690bSSong Liu } 3211e6d690bSSong Liu } 3221e6d690bSSong Liu } 3231e6d690bSSong Liu 324ff875738SArtur Paszkiewicz void r5l_wake_reclaim(struct r5l_log *log, sector_t space); 325ff875738SArtur Paszkiewicz 326a39f7afdSSong Liu /* Check whether we should flush some stripes to free up stripe cache */ 327a39f7afdSSong Liu void r5c_check_stripe_cache_usage(struct r5conf *conf) 328a39f7afdSSong Liu { 329a39f7afdSSong Liu int total_cached; 330a39f7afdSSong Liu 331a39f7afdSSong Liu if (!r5c_is_writeback(conf->log)) 332a39f7afdSSong Liu return; 333a39f7afdSSong Liu 334a39f7afdSSong Liu total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + 335a39f7afdSSong Liu atomic_read(&conf->r5c_cached_full_stripes); 336a39f7afdSSong Liu 337a39f7afdSSong Liu /* 338a39f7afdSSong Liu * The following condition is true for either of the following: 339a39f7afdSSong Liu * - stripe cache pressure high: 340a39f7afdSSong Liu * total_cached > 3/4 min_nr_stripes || 341a39f7afdSSong Liu * empty_inactive_list_nr > 0 342a39f7afdSSong Liu * - stripe cache pressure moderate: 343a39f7afdSSong Liu * total_cached > 1/2 min_nr_stripes 344a39f7afdSSong Liu */ 345a39f7afdSSong Liu if (total_cached > conf->min_nr_stripes * 1 / 2 || 346a39f7afdSSong Liu atomic_read(&conf->empty_inactive_list_nr) > 0) 347a39f7afdSSong Liu r5l_wake_reclaim(conf->log, 0); 348a39f7afdSSong Liu } 349a39f7afdSSong Liu 350a39f7afdSSong Liu /* 351a39f7afdSSong Liu * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full 352a39f7afdSSong Liu * stripes in the cache 353a39f7afdSSong Liu */ 354a39f7afdSSong Liu void r5c_check_cached_full_stripe(struct r5conf *conf) 355a39f7afdSSong Liu { 356a39f7afdSSong Liu if (!r5c_is_writeback(conf->log)) 357a39f7afdSSong Liu return; 358a39f7afdSSong Liu 359a39f7afdSSong Liu /* 360a39f7afdSSong Liu * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes 361a39f7afdSSong Liu * or a full stripe (chunk size / 4k stripes). 362a39f7afdSSong Liu */ 363a39f7afdSSong Liu if (atomic_read(&conf->r5c_cached_full_stripes) >= 36484890c03SShaohua Li min(R5C_FULL_STRIPE_FLUSH_BATCH(conf), 365c911c46cSYufen Yu conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf))) 366a39f7afdSSong Liu r5l_wake_reclaim(conf->log, 0); 367a39f7afdSSong Liu } 368a39f7afdSSong Liu 369a39f7afdSSong Liu /* 370a39f7afdSSong Liu * Total log space (in sectors) needed to flush all data in cache 371a39f7afdSSong Liu * 37239b99586SSong Liu * To avoid deadlock due to log space, it is necessary to reserve log 37339b99586SSong Liu * space to flush critical stripes (stripes that occupying log space near 37439b99586SSong Liu * last_checkpoint). This function helps check how much log space is 37539b99586SSong Liu * required to flush all cached stripes. 376a39f7afdSSong Liu * 37739b99586SSong Liu * To reduce log space requirements, two mechanisms are used to give cache 37839b99586SSong Liu * flush higher priorities: 37939b99586SSong Liu * 1. In handle_stripe_dirtying() and schedule_reconstruction(), 38039b99586SSong Liu * stripes ALREADY in journal can be flushed w/o pending writes; 38139b99586SSong Liu * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal 38239b99586SSong Liu * can be delayed (r5l_add_no_space_stripe). 383a39f7afdSSong Liu * 38439b99586SSong Liu * In cache flush, the stripe goes through 1 and then 2. For a stripe that 38539b99586SSong Liu * already passed 1, flushing it requires at most (conf->max_degraded + 1) 38639b99586SSong Liu * pages of journal space. For stripes that has not passed 1, flushing it 38739b99586SSong Liu * requires (conf->raid_disks + 1) pages of journal space. There are at 38839b99586SSong Liu * most (conf->group_cnt + 1) stripe that passed 1. So total journal space 38939b99586SSong Liu * required to flush all cached stripes (in pages) is: 39039b99586SSong Liu * 39139b99586SSong Liu * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) + 39239b99586SSong Liu * (group_cnt + 1) * (raid_disks + 1) 39339b99586SSong Liu * or 39439b99586SSong Liu * (stripe_in_journal_count) * (max_degraded + 1) + 39539b99586SSong Liu * (group_cnt + 1) * (raid_disks - max_degraded) 396a39f7afdSSong Liu */ 397a39f7afdSSong Liu static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf) 398a39f7afdSSong Liu { 399a39f7afdSSong Liu struct r5l_log *log = conf->log; 400a39f7afdSSong Liu 401a39f7afdSSong Liu if (!r5c_is_writeback(log)) 402a39f7afdSSong Liu return 0; 403a39f7afdSSong Liu 40439b99586SSong Liu return BLOCK_SECTORS * 40539b99586SSong Liu ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) + 40639b99586SSong Liu (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1)); 407a39f7afdSSong Liu } 408a39f7afdSSong Liu 409a39f7afdSSong Liu /* 410a39f7afdSSong Liu * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL 411a39f7afdSSong Liu * 412a39f7afdSSong Liu * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of 413a39f7afdSSong Liu * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log 414a39f7afdSSong Liu * device is less than 2x of reclaim_required_space. 415a39f7afdSSong Liu */ 416a39f7afdSSong Liu static inline void r5c_update_log_state(struct r5l_log *log) 417a39f7afdSSong Liu { 418a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private; 419a39f7afdSSong Liu sector_t free_space; 420a39f7afdSSong Liu sector_t reclaim_space; 421f687a33eSSong Liu bool wake_reclaim = false; 422a39f7afdSSong Liu 423a39f7afdSSong Liu if (!r5c_is_writeback(log)) 424a39f7afdSSong Liu return; 425a39f7afdSSong Liu 426a39f7afdSSong Liu free_space = r5l_ring_distance(log, log->log_start, 427a39f7afdSSong Liu log->last_checkpoint); 428a39f7afdSSong Liu reclaim_space = r5c_log_required_to_flush_cache(conf); 429a39f7afdSSong Liu if (free_space < 2 * reclaim_space) 430a39f7afdSSong Liu set_bit(R5C_LOG_CRITICAL, &conf->cache_state); 431f687a33eSSong Liu else { 432f687a33eSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) 433f687a33eSSong Liu wake_reclaim = true; 434a39f7afdSSong Liu clear_bit(R5C_LOG_CRITICAL, &conf->cache_state); 435f687a33eSSong Liu } 436a39f7afdSSong Liu if (free_space < 3 * reclaim_space) 437a39f7afdSSong Liu set_bit(R5C_LOG_TIGHT, &conf->cache_state); 438a39f7afdSSong Liu else 439a39f7afdSSong Liu clear_bit(R5C_LOG_TIGHT, &conf->cache_state); 440f687a33eSSong Liu 441f687a33eSSong Liu if (wake_reclaim) 442f687a33eSSong Liu r5l_wake_reclaim(log, 0); 443a39f7afdSSong Liu } 444a39f7afdSSong Liu 4452ded3703SSong Liu /* 4462ded3703SSong Liu * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING. 4472ded3703SSong Liu * This function should only be called in write-back mode. 4482ded3703SSong Liu */ 449a39f7afdSSong Liu void r5c_make_stripe_write_out(struct stripe_head *sh) 4502ded3703SSong Liu { 4512ded3703SSong Liu struct r5conf *conf = sh->raid_conf; 4522ded3703SSong Liu struct r5l_log *log = conf->log; 4532ded3703SSong Liu 4542ded3703SSong Liu BUG_ON(!r5c_is_writeback(log)); 4552ded3703SSong Liu 4562ded3703SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 4572ded3703SSong Liu clear_bit(STRIPE_R5C_CACHING, &sh->state); 4581e6d690bSSong Liu 4591e6d690bSSong Liu if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4601e6d690bSSong Liu atomic_inc(&conf->preread_active_stripes); 4611e6d690bSSong Liu } 4621e6d690bSSong Liu 4631e6d690bSSong Liu static void r5c_handle_data_cached(struct stripe_head *sh) 4641e6d690bSSong Liu { 4651e6d690bSSong Liu int i; 4661e6d690bSSong Liu 4671e6d690bSSong Liu for (i = sh->disks; i--; ) 4681e6d690bSSong Liu if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 4691e6d690bSSong Liu set_bit(R5_InJournal, &sh->dev[i].flags); 4701e6d690bSSong Liu clear_bit(R5_LOCKED, &sh->dev[i].flags); 4711e6d690bSSong Liu } 4721e6d690bSSong Liu clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 4731e6d690bSSong Liu } 4741e6d690bSSong Liu 4751e6d690bSSong Liu /* 4761e6d690bSSong Liu * this journal write must contain full parity, 4771e6d690bSSong Liu * it may also contain some data pages 4781e6d690bSSong Liu */ 4791e6d690bSSong Liu static void r5c_handle_parity_cached(struct stripe_head *sh) 4801e6d690bSSong Liu { 4811e6d690bSSong Liu int i; 4821e6d690bSSong Liu 4831e6d690bSSong Liu for (i = sh->disks; i--; ) 4841e6d690bSSong Liu if (test_bit(R5_InJournal, &sh->dev[i].flags)) 4851e6d690bSSong Liu set_bit(R5_Wantwrite, &sh->dev[i].flags); 4862ded3703SSong Liu } 4872ded3703SSong Liu 4882ded3703SSong Liu /* 4892ded3703SSong Liu * Setting proper flags after writing (or flushing) data and/or parity to the 4902ded3703SSong Liu * log device. This is called from r5l_log_endio() or r5l_log_flush_endio(). 4912ded3703SSong Liu */ 4922ded3703SSong Liu static void r5c_finish_cache_stripe(struct stripe_head *sh) 4932ded3703SSong Liu { 4942ded3703SSong Liu struct r5l_log *log = sh->raid_conf->log; 4952ded3703SSong Liu 4962ded3703SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 4972ded3703SSong Liu BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 4982ded3703SSong Liu /* 4992ded3703SSong Liu * Set R5_InJournal for parity dev[pd_idx]. This means 5002ded3703SSong Liu * all data AND parity in the journal. For RAID 6, it is 5012ded3703SSong Liu * NOT necessary to set the flag for dev[qd_idx], as the 5022ded3703SSong Liu * two parities are written out together. 5032ded3703SSong Liu */ 5042ded3703SSong Liu set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 5051e6d690bSSong Liu } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) { 5061e6d690bSSong Liu r5c_handle_data_cached(sh); 5071e6d690bSSong Liu } else { 5081e6d690bSSong Liu r5c_handle_parity_cached(sh); 5091e6d690bSSong Liu set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 5101e6d690bSSong Liu } 5112ded3703SSong Liu } 5122ded3703SSong Liu 513d8858f43SChristoph Hellwig static void r5l_io_run_stripes(struct r5l_io_unit *io) 514d8858f43SChristoph Hellwig { 515d8858f43SChristoph Hellwig struct stripe_head *sh, *next; 516d8858f43SChristoph Hellwig 517d8858f43SChristoph Hellwig list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { 518d8858f43SChristoph Hellwig list_del_init(&sh->log_list); 5192ded3703SSong Liu 5202ded3703SSong Liu r5c_finish_cache_stripe(sh); 5212ded3703SSong Liu 522d8858f43SChristoph Hellwig set_bit(STRIPE_HANDLE, &sh->state); 523d8858f43SChristoph Hellwig raid5_release_stripe(sh); 524d8858f43SChristoph Hellwig } 525d8858f43SChristoph Hellwig } 526d8858f43SChristoph Hellwig 52756fef7c6SChristoph Hellwig static void r5l_log_run_stripes(struct r5l_log *log) 52856fef7c6SChristoph Hellwig { 52956fef7c6SChristoph Hellwig struct r5l_io_unit *io, *next; 53056fef7c6SChristoph Hellwig 531efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock); 53256fef7c6SChristoph Hellwig 53356fef7c6SChristoph Hellwig list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 53456fef7c6SChristoph Hellwig /* don't change list order */ 53556fef7c6SChristoph Hellwig if (io->state < IO_UNIT_IO_END) 53656fef7c6SChristoph Hellwig break; 53756fef7c6SChristoph Hellwig 53856fef7c6SChristoph Hellwig list_move_tail(&io->log_sibling, &log->finished_ios); 53956fef7c6SChristoph Hellwig r5l_io_run_stripes(io); 54056fef7c6SChristoph Hellwig } 54156fef7c6SChristoph Hellwig } 54256fef7c6SChristoph Hellwig 5433848c0bcSChristoph Hellwig static void r5l_move_to_end_ios(struct r5l_log *log) 5443848c0bcSChristoph Hellwig { 5453848c0bcSChristoph Hellwig struct r5l_io_unit *io, *next; 5463848c0bcSChristoph Hellwig 547efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock); 5483848c0bcSChristoph Hellwig 5493848c0bcSChristoph Hellwig list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 5503848c0bcSChristoph Hellwig /* don't change list order */ 5513848c0bcSChristoph Hellwig if (io->state < IO_UNIT_IO_END) 5523848c0bcSChristoph Hellwig break; 5533848c0bcSChristoph Hellwig list_move_tail(&io->log_sibling, &log->io_end_ios); 5543848c0bcSChristoph Hellwig } 5553848c0bcSChristoph Hellwig } 5563848c0bcSChristoph Hellwig 5573bddb7f8SSong Liu static void __r5l_stripe_write_finished(struct r5l_io_unit *io); 558f6bed0efSShaohua Li static void r5l_log_endio(struct bio *bio) 559f6bed0efSShaohua Li { 560f6bed0efSShaohua Li struct r5l_io_unit *io = bio->bi_private; 5613bddb7f8SSong Liu struct r5l_io_unit *io_deferred; 562f6bed0efSShaohua Li struct r5l_log *log = io->log; 563509ffec7SChristoph Hellwig unsigned long flags; 564a9501d74SSong Liu bool has_null_flush; 565a9501d74SSong Liu bool has_flush_payload; 566f6bed0efSShaohua Li 5674e4cbee9SChristoph Hellwig if (bio->bi_status) 5686e74a9cfSShaohua Li md_error(log->rdev->mddev, log->rdev); 5696e74a9cfSShaohua Li 570f6bed0efSShaohua Li bio_put(bio); 571afeee514SKent Overstreet mempool_free(io->meta_page, &log->meta_pool); 572f6bed0efSShaohua Li 573509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags); 574509ffec7SChristoph Hellwig __r5l_set_io_unit_state(io, IO_UNIT_IO_END); 575a9501d74SSong Liu 576a9501d74SSong Liu /* 577a9501d74SSong Liu * if the io doesn't not have null_flush or flush payload, 578a9501d74SSong Liu * it is not safe to access it after releasing io_list_lock. 579a9501d74SSong Liu * Therefore, it is necessary to check the condition with 580a9501d74SSong Liu * the lock held. 581a9501d74SSong Liu */ 582a9501d74SSong Liu has_null_flush = io->has_null_flush; 583a9501d74SSong Liu has_flush_payload = io->has_flush_payload; 584a9501d74SSong Liu 585ea17481fSSong Liu if (log->need_cache_flush && !list_empty(&io->stripe_list)) 5863848c0bcSChristoph Hellwig r5l_move_to_end_ios(log); 58756fef7c6SChristoph Hellwig else 58856fef7c6SChristoph Hellwig r5l_log_run_stripes(log); 5893bddb7f8SSong Liu if (!list_empty(&log->running_ios)) { 5903bddb7f8SSong Liu /* 5913bddb7f8SSong Liu * FLUSH/FUA io_unit is deferred because of ordering, now we 5923bddb7f8SSong Liu * can dispatch it 5933bddb7f8SSong Liu */ 5943bddb7f8SSong Liu io_deferred = list_first_entry(&log->running_ios, 5953bddb7f8SSong Liu struct r5l_io_unit, log_sibling); 5963bddb7f8SSong Liu if (io_deferred->io_deferred) 5973bddb7f8SSong Liu schedule_work(&log->deferred_io_work); 5983bddb7f8SSong Liu } 5993bddb7f8SSong Liu 600509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags); 601509ffec7SChristoph Hellwig 60256fef7c6SChristoph Hellwig if (log->need_cache_flush) 603f6bed0efSShaohua Li md_wakeup_thread(log->rdev->mddev->thread); 6043bddb7f8SSong Liu 605a9501d74SSong Liu /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ 606a9501d74SSong Liu if (has_null_flush) { 6073bddb7f8SSong Liu struct bio *bi; 6083bddb7f8SSong Liu 6093bddb7f8SSong Liu WARN_ON(bio_list_empty(&io->flush_barriers)); 6103bddb7f8SSong Liu while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { 6113bddb7f8SSong Liu bio_endio(bi); 612a9501d74SSong Liu if (atomic_dec_and_test(&io->pending_stripe)) { 613a9501d74SSong Liu __r5l_stripe_write_finished(io); 614a9501d74SSong Liu return; 6153bddb7f8SSong Liu } 616ea17481fSSong Liu } 617a9501d74SSong Liu } 618a9501d74SSong Liu /* decrease pending_stripe for flush payload */ 619a9501d74SSong Liu if (has_flush_payload) 620a9501d74SSong Liu if (atomic_dec_and_test(&io->pending_stripe)) 6213bddb7f8SSong Liu __r5l_stripe_write_finished(io); 6223bddb7f8SSong Liu } 6233bddb7f8SSong Liu 6243bddb7f8SSong Liu static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) 6253bddb7f8SSong Liu { 6263bddb7f8SSong Liu unsigned long flags; 6273bddb7f8SSong Liu 6283bddb7f8SSong Liu spin_lock_irqsave(&log->io_list_lock, flags); 6293bddb7f8SSong Liu __r5l_set_io_unit_state(io, IO_UNIT_IO_START); 6303bddb7f8SSong Liu spin_unlock_irqrestore(&log->io_list_lock, flags); 6313bddb7f8SSong Liu 632bb3338d3SSong Liu /* 633bb3338d3SSong Liu * In case of journal device failures, submit_bio will get error 634bb3338d3SSong Liu * and calls endio, then active stripes will continue write 635bb3338d3SSong Liu * process. Therefore, it is not necessary to check Faulty bit 636bb3338d3SSong Liu * of journal device here. 637bb3338d3SSong Liu * 638bb3338d3SSong Liu * We can't check split_bio after current_bio is submitted. If 639bb3338d3SSong Liu * io->split_bio is null, after current_bio is submitted, current_bio 640bb3338d3SSong Liu * might already be completed and the io_unit is freed. We submit 641bb3338d3SSong Liu * split_bio first to avoid the issue. 642bb3338d3SSong Liu */ 643bb3338d3SSong Liu if (io->split_bio) { 6443bddb7f8SSong Liu if (io->has_flush) 64520737738SShaohua Li io->split_bio->bi_opf |= REQ_PREFLUSH; 6463bddb7f8SSong Liu if (io->has_fua) 64720737738SShaohua Li io->split_bio->bi_opf |= REQ_FUA; 6483bddb7f8SSong Liu submit_bio(io->split_bio); 6493bddb7f8SSong Liu } 6503bddb7f8SSong Liu 651bb3338d3SSong Liu if (io->has_flush) 652bb3338d3SSong Liu io->current_bio->bi_opf |= REQ_PREFLUSH; 653bb3338d3SSong Liu if (io->has_fua) 654bb3338d3SSong Liu io->current_bio->bi_opf |= REQ_FUA; 655bb3338d3SSong Liu submit_bio(io->current_bio); 656bb3338d3SSong Liu } 657bb3338d3SSong Liu 6583bddb7f8SSong Liu /* deferred io_unit will be dispatched here */ 6593bddb7f8SSong Liu static void r5l_submit_io_async(struct work_struct *work) 6603bddb7f8SSong Liu { 6613bddb7f8SSong Liu struct r5l_log *log = container_of(work, struct r5l_log, 6623bddb7f8SSong Liu deferred_io_work); 6633bddb7f8SSong Liu struct r5l_io_unit *io = NULL; 6643bddb7f8SSong Liu unsigned long flags; 6653bddb7f8SSong Liu 6663bddb7f8SSong Liu spin_lock_irqsave(&log->io_list_lock, flags); 6673bddb7f8SSong Liu if (!list_empty(&log->running_ios)) { 6683bddb7f8SSong Liu io = list_first_entry(&log->running_ios, struct r5l_io_unit, 6693bddb7f8SSong Liu log_sibling); 6703bddb7f8SSong Liu if (!io->io_deferred) 6713bddb7f8SSong Liu io = NULL; 6723bddb7f8SSong Liu else 6733bddb7f8SSong Liu io->io_deferred = 0; 6743bddb7f8SSong Liu } 6753bddb7f8SSong Liu spin_unlock_irqrestore(&log->io_list_lock, flags); 6763bddb7f8SSong Liu if (io) 6773bddb7f8SSong Liu r5l_do_submit_io(log, io); 678f6bed0efSShaohua Li } 679f6bed0efSShaohua Li 6802e38a37fSSong Liu static void r5c_disable_writeback_async(struct work_struct *work) 6812e38a37fSSong Liu { 6822e38a37fSSong Liu struct r5l_log *log = container_of(work, struct r5l_log, 6832e38a37fSSong Liu disable_writeback_work); 6842e38a37fSSong Liu struct mddev *mddev = log->rdev->mddev; 6854d5324f7SNeilBrown struct r5conf *conf = mddev->private; 6864d5324f7SNeilBrown int locked = 0; 6872e38a37fSSong Liu 6882e38a37fSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 6892e38a37fSSong Liu return; 6902e38a37fSSong Liu pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", 6912e38a37fSSong Liu mdname(mddev)); 69270d466f7SSong Liu 69370d466f7SSong Liu /* wait superblock change before suspend */ 69470d466f7SSong Liu wait_event(mddev->sb_wait, 6954d5324f7SNeilBrown conf->log == NULL || 6964d5324f7SNeilBrown (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && 6974d5324f7SNeilBrown (locked = mddev_trylock(mddev)))); 6984d5324f7SNeilBrown if (locked) { 6992e38a37fSSong Liu mddev_suspend(mddev); 7002e38a37fSSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 7012e38a37fSSong Liu mddev_resume(mddev); 7024d5324f7SNeilBrown mddev_unlock(mddev); 7034d5324f7SNeilBrown } 7042e38a37fSSong Liu } 7052e38a37fSSong Liu 706f6bed0efSShaohua Li static void r5l_submit_current_io(struct r5l_log *log) 707f6bed0efSShaohua Li { 708f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io; 709f6bed0efSShaohua Li struct r5l_meta_block *block; 710509ffec7SChristoph Hellwig unsigned long flags; 711f6bed0efSShaohua Li u32 crc; 7123bddb7f8SSong Liu bool do_submit = true; 713f6bed0efSShaohua Li 714f6bed0efSShaohua Li if (!io) 715f6bed0efSShaohua Li return; 716f6bed0efSShaohua Li 717f6bed0efSShaohua Li block = page_address(io->meta_page); 718f6bed0efSShaohua Li block->meta_size = cpu_to_le32(io->meta_offset); 7195cb2fbd6SShaohua Li crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); 720f6bed0efSShaohua Li block->checksum = cpu_to_le32(crc); 721f6bed0efSShaohua Li 722f6bed0efSShaohua Li log->current_io = NULL; 723509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags); 7243bddb7f8SSong Liu if (io->has_flush || io->has_fua) { 7253bddb7f8SSong Liu if (io != list_first_entry(&log->running_ios, 7263bddb7f8SSong Liu struct r5l_io_unit, log_sibling)) { 7273bddb7f8SSong Liu io->io_deferred = 1; 7283bddb7f8SSong Liu do_submit = false; 7293bddb7f8SSong Liu } 7303bddb7f8SSong Liu } 731509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags); 7323bddb7f8SSong Liu if (do_submit) 7333bddb7f8SSong Liu r5l_do_submit_io(log, io); 734f6bed0efSShaohua Li } 735f6bed0efSShaohua Li 7366143e2ceSChristoph Hellwig static struct bio *r5l_bio_alloc(struct r5l_log *log) 737b349feb3SChristoph Hellwig { 738609be106SChristoph Hellwig struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS, 739609be106SChristoph Hellwig REQ_OP_WRITE, GFP_NOIO, &log->bs); 740b349feb3SChristoph Hellwig 7411e932a37SChristoph Hellwig bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; 742b349feb3SChristoph Hellwig 743b349feb3SChristoph Hellwig return bio; 744b349feb3SChristoph Hellwig } 745b349feb3SChristoph Hellwig 746c1b99198SChristoph Hellwig static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) 747c1b99198SChristoph Hellwig { 748c1b99198SChristoph Hellwig log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); 749c1b99198SChristoph Hellwig 750a39f7afdSSong Liu r5c_update_log_state(log); 751c1b99198SChristoph Hellwig /* 752c1b99198SChristoph Hellwig * If we filled up the log device start from the beginning again, 753c1b99198SChristoph Hellwig * which will require a new bio. 754c1b99198SChristoph Hellwig * 755c1b99198SChristoph Hellwig * Note: for this to work properly the log size needs to me a multiple 756c1b99198SChristoph Hellwig * of BLOCK_SECTORS. 757c1b99198SChristoph Hellwig */ 758c1b99198SChristoph Hellwig if (log->log_start == 0) 7596143e2ceSChristoph Hellwig io->need_split_bio = true; 760c1b99198SChristoph Hellwig 761c1b99198SChristoph Hellwig io->log_end = log->log_start; 762c1b99198SChristoph Hellwig } 763c1b99198SChristoph Hellwig 764f6bed0efSShaohua Li static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) 765f6bed0efSShaohua Li { 766f6bed0efSShaohua Li struct r5l_io_unit *io; 767f6bed0efSShaohua Li struct r5l_meta_block *block; 768f6bed0efSShaohua Li 769afeee514SKent Overstreet io = mempool_alloc(&log->io_pool, GFP_ATOMIC); 7705036c390SChristoph Hellwig if (!io) 7715036c390SChristoph Hellwig return NULL; 7725036c390SChristoph Hellwig memset(io, 0, sizeof(*io)); 7735036c390SChristoph Hellwig 77451039cd0SChristoph Hellwig io->log = log; 77551039cd0SChristoph Hellwig INIT_LIST_HEAD(&io->log_sibling); 77651039cd0SChristoph Hellwig INIT_LIST_HEAD(&io->stripe_list); 7773bddb7f8SSong Liu bio_list_init(&io->flush_barriers); 77851039cd0SChristoph Hellwig io->state = IO_UNIT_RUNNING; 779f6bed0efSShaohua Li 780afeee514SKent Overstreet io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO); 781f6bed0efSShaohua Li block = page_address(io->meta_page); 782e8deb638SChristoph Hellwig clear_page(block); 783f6bed0efSShaohua Li block->magic = cpu_to_le32(R5LOG_MAGIC); 784f6bed0efSShaohua Li block->version = R5LOG_VERSION; 785f6bed0efSShaohua Li block->seq = cpu_to_le64(log->seq); 786f6bed0efSShaohua Li block->position = cpu_to_le64(log->log_start); 787f6bed0efSShaohua Li 788f6bed0efSShaohua Li io->log_start = log->log_start; 789f6bed0efSShaohua Li io->meta_offset = sizeof(struct r5l_meta_block); 7902b8ef16eSChristoph Hellwig io->seq = log->seq++; 791f6bed0efSShaohua Li 7926143e2ceSChristoph Hellwig io->current_bio = r5l_bio_alloc(log); 7936143e2ceSChristoph Hellwig io->current_bio->bi_end_io = r5l_log_endio; 7946143e2ceSChristoph Hellwig io->current_bio->bi_private = io; 795b349feb3SChristoph Hellwig bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); 796f6bed0efSShaohua Li 797c1b99198SChristoph Hellwig r5_reserve_log_entry(log, io); 798f6bed0efSShaohua Li 799f6bed0efSShaohua Li spin_lock_irq(&log->io_list_lock); 800f6bed0efSShaohua Li list_add_tail(&io->log_sibling, &log->running_ios); 801f6bed0efSShaohua Li spin_unlock_irq(&log->io_list_lock); 802f6bed0efSShaohua Li 803f6bed0efSShaohua Li return io; 804f6bed0efSShaohua Li } 805f6bed0efSShaohua Li 806f6bed0efSShaohua Li static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) 807f6bed0efSShaohua Li { 80822581f58SChristoph Hellwig if (log->current_io && 80922581f58SChristoph Hellwig log->current_io->meta_offset + payload_size > PAGE_SIZE) 810f6bed0efSShaohua Li r5l_submit_current_io(log); 811f6bed0efSShaohua Li 8125036c390SChristoph Hellwig if (!log->current_io) { 813f6bed0efSShaohua Li log->current_io = r5l_new_meta(log); 8145036c390SChristoph Hellwig if (!log->current_io) 8155036c390SChristoph Hellwig return -ENOMEM; 8165036c390SChristoph Hellwig } 8175036c390SChristoph Hellwig 818f6bed0efSShaohua Li return 0; 819f6bed0efSShaohua Li } 820f6bed0efSShaohua Li 821f6bed0efSShaohua Li static void r5l_append_payload_meta(struct r5l_log *log, u16 type, 822f6bed0efSShaohua Li sector_t location, 823f6bed0efSShaohua Li u32 checksum1, u32 checksum2, 824f6bed0efSShaohua Li bool checksum2_valid) 825f6bed0efSShaohua Li { 826f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io; 827f6bed0efSShaohua Li struct r5l_payload_data_parity *payload; 828f6bed0efSShaohua Li 829f6bed0efSShaohua Li payload = page_address(io->meta_page) + io->meta_offset; 830f6bed0efSShaohua Li payload->header.type = cpu_to_le16(type); 831f6bed0efSShaohua Li payload->header.flags = cpu_to_le16(0); 832f6bed0efSShaohua Li payload->size = cpu_to_le32((1 + !!checksum2_valid) << 833f6bed0efSShaohua Li (PAGE_SHIFT - 9)); 834f6bed0efSShaohua Li payload->location = cpu_to_le64(location); 835f6bed0efSShaohua Li payload->checksum[0] = cpu_to_le32(checksum1); 836f6bed0efSShaohua Li if (checksum2_valid) 837f6bed0efSShaohua Li payload->checksum[1] = cpu_to_le32(checksum2); 838f6bed0efSShaohua Li 839f6bed0efSShaohua Li io->meta_offset += sizeof(struct r5l_payload_data_parity) + 840f6bed0efSShaohua Li sizeof(__le32) * (1 + !!checksum2_valid); 841f6bed0efSShaohua Li } 842f6bed0efSShaohua Li 843f6bed0efSShaohua Li static void r5l_append_payload_page(struct r5l_log *log, struct page *page) 844f6bed0efSShaohua Li { 845f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io; 846f6bed0efSShaohua Li 8476143e2ceSChristoph Hellwig if (io->need_split_bio) { 8483bddb7f8SSong Liu BUG_ON(io->split_bio); 8493bddb7f8SSong Liu io->split_bio = io->current_bio; 8506143e2ceSChristoph Hellwig io->current_bio = r5l_bio_alloc(log); 8513bddb7f8SSong Liu bio_chain(io->current_bio, io->split_bio); 8523bddb7f8SSong Liu io->need_split_bio = false; 853f6bed0efSShaohua Li } 854f6bed0efSShaohua Li 8556143e2ceSChristoph Hellwig if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) 8566143e2ceSChristoph Hellwig BUG(); 8576143e2ceSChristoph Hellwig 858c1b99198SChristoph Hellwig r5_reserve_log_entry(log, io); 859f6bed0efSShaohua Li } 860f6bed0efSShaohua Li 861ea17481fSSong Liu static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) 862ea17481fSSong Liu { 863ea17481fSSong Liu struct mddev *mddev = log->rdev->mddev; 864ea17481fSSong Liu struct r5conf *conf = mddev->private; 865ea17481fSSong Liu struct r5l_io_unit *io; 866ea17481fSSong Liu struct r5l_payload_flush *payload; 867ea17481fSSong Liu int meta_size; 868ea17481fSSong Liu 869ea17481fSSong Liu /* 870ea17481fSSong Liu * payload_flush requires extra writes to the journal. 871ea17481fSSong Liu * To avoid handling the extra IO in quiesce, just skip 872ea17481fSSong Liu * flush_payload 873ea17481fSSong Liu */ 874ea17481fSSong Liu if (conf->quiesce) 875ea17481fSSong Liu return; 876ea17481fSSong Liu 877ea17481fSSong Liu mutex_lock(&log->io_mutex); 878ea17481fSSong Liu meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64); 879ea17481fSSong Liu 880ea17481fSSong Liu if (r5l_get_meta(log, meta_size)) { 881ea17481fSSong Liu mutex_unlock(&log->io_mutex); 882ea17481fSSong Liu return; 883ea17481fSSong Liu } 884ea17481fSSong Liu 885ea17481fSSong Liu /* current implementation is one stripe per flush payload */ 886ea17481fSSong Liu io = log->current_io; 887ea17481fSSong Liu payload = page_address(io->meta_page) + io->meta_offset; 888ea17481fSSong Liu payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH); 889ea17481fSSong Liu payload->header.flags = cpu_to_le16(0); 890ea17481fSSong Liu payload->size = cpu_to_le32(sizeof(__le64)); 891ea17481fSSong Liu payload->flush_stripes[0] = cpu_to_le64(sect); 892ea17481fSSong Liu io->meta_offset += meta_size; 893a9501d74SSong Liu /* multiple flush payloads count as one pending_stripe */ 894a9501d74SSong Liu if (!io->has_flush_payload) { 895a9501d74SSong Liu io->has_flush_payload = 1; 896a9501d74SSong Liu atomic_inc(&io->pending_stripe); 897a9501d74SSong Liu } 898ea17481fSSong Liu mutex_unlock(&log->io_mutex); 899ea17481fSSong Liu } 900ea17481fSSong Liu 9015036c390SChristoph Hellwig static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, 902f6bed0efSShaohua Li int data_pages, int parity_pages) 903f6bed0efSShaohua Li { 904f6bed0efSShaohua Li int i; 905f6bed0efSShaohua Li int meta_size; 9065036c390SChristoph Hellwig int ret; 907f6bed0efSShaohua Li struct r5l_io_unit *io; 908f6bed0efSShaohua Li 909f6bed0efSShaohua Li meta_size = 910f6bed0efSShaohua Li ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) 911f6bed0efSShaohua Li * data_pages) + 912f6bed0efSShaohua Li sizeof(struct r5l_payload_data_parity) + 913f6bed0efSShaohua Li sizeof(__le32) * parity_pages; 914f6bed0efSShaohua Li 9155036c390SChristoph Hellwig ret = r5l_get_meta(log, meta_size); 9165036c390SChristoph Hellwig if (ret) 9175036c390SChristoph Hellwig return ret; 9185036c390SChristoph Hellwig 919f6bed0efSShaohua Li io = log->current_io; 920f6bed0efSShaohua Li 9213bddb7f8SSong Liu if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state)) 9223bddb7f8SSong Liu io->has_flush = 1; 9233bddb7f8SSong Liu 924f6bed0efSShaohua Li for (i = 0; i < sh->disks; i++) { 9251e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || 9261e6d690bSSong Liu test_bit(R5_InJournal, &sh->dev[i].flags)) 927f6bed0efSShaohua Li continue; 928f6bed0efSShaohua Li if (i == sh->pd_idx || i == sh->qd_idx) 929f6bed0efSShaohua Li continue; 9303bddb7f8SSong Liu if (test_bit(R5_WantFUA, &sh->dev[i].flags) && 9313bddb7f8SSong Liu log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) { 9323bddb7f8SSong Liu io->has_fua = 1; 9333bddb7f8SSong Liu /* 9343bddb7f8SSong Liu * we need to flush journal to make sure recovery can 9353bddb7f8SSong Liu * reach the data with fua flag 9363bddb7f8SSong Liu */ 9373bddb7f8SSong Liu io->has_flush = 1; 9383bddb7f8SSong Liu } 939f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, 940f6bed0efSShaohua Li raid5_compute_blocknr(sh, i, 0), 941f6bed0efSShaohua Li sh->dev[i].log_checksum, 0, false); 942f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[i].page); 943f6bed0efSShaohua Li } 944f6bed0efSShaohua Li 9452ded3703SSong Liu if (parity_pages == 2) { 946f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, 947f6bed0efSShaohua Li sh->sector, sh->dev[sh->pd_idx].log_checksum, 948f6bed0efSShaohua Li sh->dev[sh->qd_idx].log_checksum, true); 949f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); 950f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); 9512ded3703SSong Liu } else if (parity_pages == 1) { 952f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, 953f6bed0efSShaohua Li sh->sector, sh->dev[sh->pd_idx].log_checksum, 954f6bed0efSShaohua Li 0, false); 955f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); 9562ded3703SSong Liu } else /* Just writing data, not parity, in caching phase */ 9572ded3703SSong Liu BUG_ON(parity_pages != 0); 958f6bed0efSShaohua Li 959f6bed0efSShaohua Li list_add_tail(&sh->log_list, &io->stripe_list); 960f6bed0efSShaohua Li atomic_inc(&io->pending_stripe); 961f6bed0efSShaohua Li sh->log_io = io; 9625036c390SChristoph Hellwig 963a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 964a39f7afdSSong Liu return 0; 965a39f7afdSSong Liu 966a39f7afdSSong Liu if (sh->log_start == MaxSector) { 967a39f7afdSSong Liu BUG_ON(!list_empty(&sh->r5c)); 968a39f7afdSSong Liu sh->log_start = io->log_start; 969a39f7afdSSong Liu spin_lock_irq(&log->stripe_in_journal_lock); 970a39f7afdSSong Liu list_add_tail(&sh->r5c, 971a39f7afdSSong Liu &log->stripe_in_journal_list); 972a39f7afdSSong Liu spin_unlock_irq(&log->stripe_in_journal_lock); 973a39f7afdSSong Liu atomic_inc(&log->stripe_in_journal_count); 974a39f7afdSSong Liu } 9755036c390SChristoph Hellwig return 0; 976f6bed0efSShaohua Li } 977f6bed0efSShaohua Li 978a39f7afdSSong Liu /* add stripe to no_space_stripes, and then wake up reclaim */ 979a39f7afdSSong Liu static inline void r5l_add_no_space_stripe(struct r5l_log *log, 980a39f7afdSSong Liu struct stripe_head *sh) 981a39f7afdSSong Liu { 982a39f7afdSSong Liu spin_lock(&log->no_space_stripes_lock); 983a39f7afdSSong Liu list_add_tail(&sh->log_list, &log->no_space_stripes); 984a39f7afdSSong Liu spin_unlock(&log->no_space_stripes_lock); 985a39f7afdSSong Liu } 986a39f7afdSSong Liu 987f6bed0efSShaohua Li /* 988f6bed0efSShaohua Li * running in raid5d, where reclaim could wait for raid5d too (when it flushes 989f6bed0efSShaohua Li * data from log to raid disks), so we shouldn't wait for reclaim here 990f6bed0efSShaohua Li */ 991f6bed0efSShaohua Li int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) 992f6bed0efSShaohua Li { 993a39f7afdSSong Liu struct r5conf *conf = sh->raid_conf; 994f6bed0efSShaohua Li int write_disks = 0; 995f6bed0efSShaohua Li int data_pages, parity_pages; 996f6bed0efSShaohua Li int reserve; 997f6bed0efSShaohua Li int i; 9985036c390SChristoph Hellwig int ret = 0; 999a39f7afdSSong Liu bool wake_reclaim = false; 1000f6bed0efSShaohua Li 1001f6bed0efSShaohua Li if (!log) 1002f6bed0efSShaohua Li return -EAGAIN; 1003f6bed0efSShaohua Li /* Don't support stripe batch */ 1004f6bed0efSShaohua Li if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || 1005f6bed0efSShaohua Li test_bit(STRIPE_SYNCING, &sh->state)) { 1006f6bed0efSShaohua Li /* the stripe is written to log, we start writing it to raid */ 1007f6bed0efSShaohua Li clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 1008f6bed0efSShaohua Li return -EAGAIN; 1009f6bed0efSShaohua Li } 1010f6bed0efSShaohua Li 10112ded3703SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 10122ded3703SSong Liu 1013f6bed0efSShaohua Li for (i = 0; i < sh->disks; i++) { 1014f6bed0efSShaohua Li void *addr; 1015f6bed0efSShaohua Li 10161e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || 10171e6d690bSSong Liu test_bit(R5_InJournal, &sh->dev[i].flags)) 1018f6bed0efSShaohua Li continue; 10191e6d690bSSong Liu 1020f6bed0efSShaohua Li write_disks++; 1021f6bed0efSShaohua Li /* checksum is already calculated in last run */ 1022f6bed0efSShaohua Li if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 1023f6bed0efSShaohua Li continue; 1024f6bed0efSShaohua Li addr = kmap_atomic(sh->dev[i].page); 10255cb2fbd6SShaohua Li sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, 1026f6bed0efSShaohua Li addr, PAGE_SIZE); 1027f6bed0efSShaohua Li kunmap_atomic(addr); 1028f6bed0efSShaohua Li } 1029f6bed0efSShaohua Li parity_pages = 1 + !!(sh->qd_idx >= 0); 1030f6bed0efSShaohua Li data_pages = write_disks - parity_pages; 1031f6bed0efSShaohua Li 1032f6bed0efSShaohua Li set_bit(STRIPE_LOG_TRAPPED, &sh->state); 1033253f9fd4SShaohua Li /* 1034253f9fd4SShaohua Li * The stripe must enter state machine again to finish the write, so 1035253f9fd4SShaohua Li * don't delay. 1036253f9fd4SShaohua Li */ 1037253f9fd4SShaohua Li clear_bit(STRIPE_DELAYED, &sh->state); 1038f6bed0efSShaohua Li atomic_inc(&sh->count); 1039f6bed0efSShaohua Li 1040f6bed0efSShaohua Li mutex_lock(&log->io_mutex); 1041f6bed0efSShaohua Li /* meta + data */ 1042f6bed0efSShaohua Li reserve = (1 + write_disks) << (PAGE_SHIFT - 9); 1043f6bed0efSShaohua Li 1044a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 1045a39f7afdSSong Liu if (!r5l_has_free_space(log, reserve)) { 1046a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 1047a39f7afdSSong Liu wake_reclaim = true; 10485036c390SChristoph Hellwig } else { 10495036c390SChristoph Hellwig ret = r5l_log_stripe(log, sh, data_pages, parity_pages); 10505036c390SChristoph Hellwig if (ret) { 10515036c390SChristoph Hellwig spin_lock_irq(&log->io_list_lock); 1052a39f7afdSSong Liu list_add_tail(&sh->log_list, 1053a39f7afdSSong Liu &log->no_mem_stripes); 10545036c390SChristoph Hellwig spin_unlock_irq(&log->io_list_lock); 1055f6bed0efSShaohua Li } 10565036c390SChristoph Hellwig } 1057a39f7afdSSong Liu } else { /* R5C_JOURNAL_MODE_WRITE_BACK */ 1058a39f7afdSSong Liu /* 1059a39f7afdSSong Liu * log space critical, do not process stripes that are 1060a39f7afdSSong Liu * not in cache yet (sh->log_start == MaxSector). 1061a39f7afdSSong Liu */ 1062a39f7afdSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 1063a39f7afdSSong Liu sh->log_start == MaxSector) { 1064a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 1065a39f7afdSSong Liu wake_reclaim = true; 1066a39f7afdSSong Liu reserve = 0; 1067a39f7afdSSong Liu } else if (!r5l_has_free_space(log, reserve)) { 1068a39f7afdSSong Liu if (sh->log_start == log->last_checkpoint) 1069a39f7afdSSong Liu BUG(); 1070a39f7afdSSong Liu else 1071a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 1072a39f7afdSSong Liu } else { 1073a39f7afdSSong Liu ret = r5l_log_stripe(log, sh, data_pages, parity_pages); 1074a39f7afdSSong Liu if (ret) { 1075a39f7afdSSong Liu spin_lock_irq(&log->io_list_lock); 1076a39f7afdSSong Liu list_add_tail(&sh->log_list, 1077a39f7afdSSong Liu &log->no_mem_stripes); 1078a39f7afdSSong Liu spin_unlock_irq(&log->io_list_lock); 1079a39f7afdSSong Liu } 1080a39f7afdSSong Liu } 1081a39f7afdSSong Liu } 1082f6bed0efSShaohua Li 10835036c390SChristoph Hellwig mutex_unlock(&log->io_mutex); 1084a39f7afdSSong Liu if (wake_reclaim) 1085a39f7afdSSong Liu r5l_wake_reclaim(log, reserve); 1086f6bed0efSShaohua Li return 0; 1087f6bed0efSShaohua Li } 1088f6bed0efSShaohua Li 1089f6bed0efSShaohua Li void r5l_write_stripe_run(struct r5l_log *log) 1090f6bed0efSShaohua Li { 1091f6bed0efSShaohua Li if (!log) 1092f6bed0efSShaohua Li return; 1093f6bed0efSShaohua Li mutex_lock(&log->io_mutex); 1094f6bed0efSShaohua Li r5l_submit_current_io(log); 1095f6bed0efSShaohua Li mutex_unlock(&log->io_mutex); 1096f6bed0efSShaohua Li } 1097f6bed0efSShaohua Li 1098828cbe98SShaohua Li int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) 1099828cbe98SShaohua Li { 11003bddb7f8SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 1101828cbe98SShaohua Li /* 11023bddb7f8SSong Liu * in write through (journal only) 11033bddb7f8SSong Liu * we flush log disk cache first, then write stripe data to 11043bddb7f8SSong Liu * raid disks. So if bio is finished, the log disk cache is 11053bddb7f8SSong Liu * flushed already. The recovery guarantees we can recovery 11063bddb7f8SSong Liu * the bio from log disk, so we don't need to flush again 1107828cbe98SShaohua Li */ 1108828cbe98SShaohua Li if (bio->bi_iter.bi_size == 0) { 1109828cbe98SShaohua Li bio_endio(bio); 1110828cbe98SShaohua Li return 0; 1111828cbe98SShaohua Li } 11121eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 11133bddb7f8SSong Liu } else { 11143bddb7f8SSong Liu /* write back (with cache) */ 11153bddb7f8SSong Liu if (bio->bi_iter.bi_size == 0) { 11163bddb7f8SSong Liu mutex_lock(&log->io_mutex); 11173bddb7f8SSong Liu r5l_get_meta(log, 0); 11183bddb7f8SSong Liu bio_list_add(&log->current_io->flush_barriers, bio); 11193bddb7f8SSong Liu log->current_io->has_flush = 1; 11203bddb7f8SSong Liu log->current_io->has_null_flush = 1; 11213bddb7f8SSong Liu atomic_inc(&log->current_io->pending_stripe); 11223bddb7f8SSong Liu r5l_submit_current_io(log); 11233bddb7f8SSong Liu mutex_unlock(&log->io_mutex); 11243bddb7f8SSong Liu return 0; 11253bddb7f8SSong Liu } 11263bddb7f8SSong Liu } 1127828cbe98SShaohua Li return -EAGAIN; 1128828cbe98SShaohua Li } 1129828cbe98SShaohua Li 1130f6bed0efSShaohua Li /* This will run after log space is reclaimed */ 1131f6bed0efSShaohua Li static void r5l_run_no_space_stripes(struct r5l_log *log) 1132f6bed0efSShaohua Li { 1133f6bed0efSShaohua Li struct stripe_head *sh; 1134f6bed0efSShaohua Li 1135f6bed0efSShaohua Li spin_lock(&log->no_space_stripes_lock); 1136f6bed0efSShaohua Li while (!list_empty(&log->no_space_stripes)) { 1137f6bed0efSShaohua Li sh = list_first_entry(&log->no_space_stripes, 1138f6bed0efSShaohua Li struct stripe_head, log_list); 1139f6bed0efSShaohua Li list_del_init(&sh->log_list); 1140f6bed0efSShaohua Li set_bit(STRIPE_HANDLE, &sh->state); 1141f6bed0efSShaohua Li raid5_release_stripe(sh); 1142f6bed0efSShaohua Li } 1143f6bed0efSShaohua Li spin_unlock(&log->no_space_stripes_lock); 1144f6bed0efSShaohua Li } 1145f6bed0efSShaohua Li 1146a39f7afdSSong Liu /* 1147a39f7afdSSong Liu * calculate new last_checkpoint 1148a39f7afdSSong Liu * for write through mode, returns log->next_checkpoint 1149a39f7afdSSong Liu * for write back, returns log_start of first sh in stripe_in_journal_list 1150a39f7afdSSong Liu */ 1151a39f7afdSSong Liu static sector_t r5c_calculate_new_cp(struct r5conf *conf) 1152a39f7afdSSong Liu { 1153a39f7afdSSong Liu struct stripe_head *sh; 1154a39f7afdSSong Liu struct r5l_log *log = conf->log; 1155a39f7afdSSong Liu sector_t new_cp; 1156a39f7afdSSong Liu unsigned long flags; 1157a39f7afdSSong Liu 1158a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 1159a39f7afdSSong Liu return log->next_checkpoint; 1160a39f7afdSSong Liu 1161a39f7afdSSong Liu spin_lock_irqsave(&log->stripe_in_journal_lock, flags); 1162a39f7afdSSong Liu if (list_empty(&conf->log->stripe_in_journal_list)) { 1163a39f7afdSSong Liu /* all stripes flushed */ 1164d3014e21SDan Carpenter spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1165a39f7afdSSong Liu return log->next_checkpoint; 1166a39f7afdSSong Liu } 1167a39f7afdSSong Liu sh = list_first_entry(&conf->log->stripe_in_journal_list, 1168a39f7afdSSong Liu struct stripe_head, r5c); 1169a39f7afdSSong Liu new_cp = sh->log_start; 1170a39f7afdSSong Liu spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1171a39f7afdSSong Liu return new_cp; 1172a39f7afdSSong Liu } 1173a39f7afdSSong Liu 117417036461SChristoph Hellwig static sector_t r5l_reclaimable_space(struct r5l_log *log) 117517036461SChristoph Hellwig { 1176a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private; 1177a39f7afdSSong Liu 117817036461SChristoph Hellwig return r5l_ring_distance(log, log->last_checkpoint, 1179a39f7afdSSong Liu r5c_calculate_new_cp(conf)); 118017036461SChristoph Hellwig } 118117036461SChristoph Hellwig 11825036c390SChristoph Hellwig static void r5l_run_no_mem_stripe(struct r5l_log *log) 11835036c390SChristoph Hellwig { 11845036c390SChristoph Hellwig struct stripe_head *sh; 11855036c390SChristoph Hellwig 1186efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock); 11875036c390SChristoph Hellwig 11885036c390SChristoph Hellwig if (!list_empty(&log->no_mem_stripes)) { 11895036c390SChristoph Hellwig sh = list_first_entry(&log->no_mem_stripes, 11905036c390SChristoph Hellwig struct stripe_head, log_list); 11915036c390SChristoph Hellwig list_del_init(&sh->log_list); 11925036c390SChristoph Hellwig set_bit(STRIPE_HANDLE, &sh->state); 11935036c390SChristoph Hellwig raid5_release_stripe(sh); 11945036c390SChristoph Hellwig } 11955036c390SChristoph Hellwig } 11965036c390SChristoph Hellwig 119704732f74SChristoph Hellwig static bool r5l_complete_finished_ios(struct r5l_log *log) 119817036461SChristoph Hellwig { 119917036461SChristoph Hellwig struct r5l_io_unit *io, *next; 120017036461SChristoph Hellwig bool found = false; 120117036461SChristoph Hellwig 1202efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock); 120317036461SChristoph Hellwig 120404732f74SChristoph Hellwig list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { 120517036461SChristoph Hellwig /* don't change list order */ 120617036461SChristoph Hellwig if (io->state < IO_UNIT_STRIPE_END) 120717036461SChristoph Hellwig break; 120817036461SChristoph Hellwig 120917036461SChristoph Hellwig log->next_checkpoint = io->log_start; 121017036461SChristoph Hellwig 121117036461SChristoph Hellwig list_del(&io->log_sibling); 1212afeee514SKent Overstreet mempool_free(io, &log->io_pool); 12135036c390SChristoph Hellwig r5l_run_no_mem_stripe(log); 121417036461SChristoph Hellwig 121517036461SChristoph Hellwig found = true; 121617036461SChristoph Hellwig } 121717036461SChristoph Hellwig 121817036461SChristoph Hellwig return found; 121917036461SChristoph Hellwig } 122017036461SChristoph Hellwig 1221509ffec7SChristoph Hellwig static void __r5l_stripe_write_finished(struct r5l_io_unit *io) 1222509ffec7SChristoph Hellwig { 1223509ffec7SChristoph Hellwig struct r5l_log *log = io->log; 1224a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private; 1225509ffec7SChristoph Hellwig unsigned long flags; 1226509ffec7SChristoph Hellwig 1227509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags); 1228509ffec7SChristoph Hellwig __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); 122917036461SChristoph Hellwig 123004732f74SChristoph Hellwig if (!r5l_complete_finished_ios(log)) { 123185f2f9a4SShaohua Li spin_unlock_irqrestore(&log->io_list_lock, flags); 123285f2f9a4SShaohua Li return; 123385f2f9a4SShaohua Li } 1234509ffec7SChristoph Hellwig 1235a39f7afdSSong Liu if (r5l_reclaimable_space(log) > log->max_free_space || 1236a39f7afdSSong Liu test_bit(R5C_LOG_TIGHT, &conf->cache_state)) 1237509ffec7SChristoph Hellwig r5l_wake_reclaim(log, 0); 1238509ffec7SChristoph Hellwig 1239509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags); 1240509ffec7SChristoph Hellwig wake_up(&log->iounit_wait); 1241509ffec7SChristoph Hellwig } 1242509ffec7SChristoph Hellwig 12430576b1c6SShaohua Li void r5l_stripe_write_finished(struct stripe_head *sh) 12440576b1c6SShaohua Li { 12450576b1c6SShaohua Li struct r5l_io_unit *io; 12460576b1c6SShaohua Li 12470576b1c6SShaohua Li io = sh->log_io; 12480576b1c6SShaohua Li sh->log_io = NULL; 12490576b1c6SShaohua Li 1250509ffec7SChristoph Hellwig if (io && atomic_dec_and_test(&io->pending_stripe)) 1251509ffec7SChristoph Hellwig __r5l_stripe_write_finished(io); 12520576b1c6SShaohua Li } 12530576b1c6SShaohua Li 1254a8c34f91SShaohua Li static void r5l_log_flush_endio(struct bio *bio) 1255a8c34f91SShaohua Li { 1256a8c34f91SShaohua Li struct r5l_log *log = container_of(bio, struct r5l_log, 1257a8c34f91SShaohua Li flush_bio); 1258a8c34f91SShaohua Li unsigned long flags; 1259a8c34f91SShaohua Li struct r5l_io_unit *io; 1260a8c34f91SShaohua Li 12614e4cbee9SChristoph Hellwig if (bio->bi_status) 12626e74a9cfSShaohua Li md_error(log->rdev->mddev, log->rdev); 12636e74a9cfSShaohua Li 1264a8c34f91SShaohua Li spin_lock_irqsave(&log->io_list_lock, flags); 1265d8858f43SChristoph Hellwig list_for_each_entry(io, &log->flushing_ios, log_sibling) 1266d8858f43SChristoph Hellwig r5l_io_run_stripes(io); 126704732f74SChristoph Hellwig list_splice_tail_init(&log->flushing_ios, &log->finished_ios); 1268a8c34f91SShaohua Li spin_unlock_irqrestore(&log->io_list_lock, flags); 1269*0dd00cbaSChristoph Hellwig 1270*0dd00cbaSChristoph Hellwig bio_uninit(bio); 1271a8c34f91SShaohua Li } 1272a8c34f91SShaohua Li 12730576b1c6SShaohua Li /* 12740576b1c6SShaohua Li * Starting dispatch IO to raid. 12750576b1c6SShaohua Li * io_unit(meta) consists of a log. There is one situation we want to avoid. A 12760576b1c6SShaohua Li * broken meta in the middle of a log causes recovery can't find meta at the 12770576b1c6SShaohua Li * head of log. If operations require meta at the head persistent in log, we 12780576b1c6SShaohua Li * must make sure meta before it persistent in log too. A case is: 12790576b1c6SShaohua Li * 12800576b1c6SShaohua Li * stripe data/parity is in log, we start write stripe to raid disks. stripe 12810576b1c6SShaohua Li * data/parity must be persistent in log before we do the write to raid disks. 12820576b1c6SShaohua Li * 12830576b1c6SShaohua Li * The solution is we restrictly maintain io_unit list order. In this case, we 12840576b1c6SShaohua Li * only write stripes of an io_unit to raid disks till the io_unit is the first 12850576b1c6SShaohua Li * one whose data/parity is in log. 12860576b1c6SShaohua Li */ 12870576b1c6SShaohua Li void r5l_flush_stripe_to_raid(struct r5l_log *log) 12880576b1c6SShaohua Li { 1289a8c34f91SShaohua Li bool do_flush; 129056fef7c6SChristoph Hellwig 129156fef7c6SChristoph Hellwig if (!log || !log->need_cache_flush) 12920576b1c6SShaohua Li return; 12930576b1c6SShaohua Li 1294a8c34f91SShaohua Li spin_lock_irq(&log->io_list_lock); 1295a8c34f91SShaohua Li /* flush bio is running */ 1296a8c34f91SShaohua Li if (!list_empty(&log->flushing_ios)) { 1297a8c34f91SShaohua Li spin_unlock_irq(&log->io_list_lock); 12980576b1c6SShaohua Li return; 12990576b1c6SShaohua Li } 1300a8c34f91SShaohua Li list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); 1301a8c34f91SShaohua Li do_flush = !list_empty(&log->flushing_ios); 13020576b1c6SShaohua Li spin_unlock_irq(&log->io_list_lock); 1303a8c34f91SShaohua Li 1304a8c34f91SShaohua Li if (!do_flush) 1305a8c34f91SShaohua Li return; 1306*0dd00cbaSChristoph Hellwig bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0, 1307a7c50c94SChristoph Hellwig REQ_OP_WRITE | REQ_PREFLUSH); 1308a8c34f91SShaohua Li log->flush_bio.bi_end_io = r5l_log_flush_endio; 13094e49ea4aSMike Christie submit_bio(&log->flush_bio); 13100576b1c6SShaohua Li } 13110576b1c6SShaohua Li 13120576b1c6SShaohua Li static void r5l_write_super(struct r5l_log *log, sector_t cp); 13134b482044SShaohua Li static void r5l_write_super_and_discard_space(struct r5l_log *log, 13144b482044SShaohua Li sector_t end) 13154b482044SShaohua Li { 13164b482044SShaohua Li struct block_device *bdev = log->rdev->bdev; 13174b482044SShaohua Li struct mddev *mddev; 13184b482044SShaohua Li 13194b482044SShaohua Li r5l_write_super(log, end); 13204b482044SShaohua Li 13214b482044SShaohua Li if (!blk_queue_discard(bdev_get_queue(bdev))) 13224b482044SShaohua Li return; 13234b482044SShaohua Li 13244b482044SShaohua Li mddev = log->rdev->mddev; 13254b482044SShaohua Li /* 13268e018c21SShaohua Li * Discard could zero data, so before discard we must make sure 13278e018c21SShaohua Li * superblock is updated to new log tail. Updating superblock (either 13288e018c21SShaohua Li * directly call md_update_sb() or depend on md thread) must hold 13298e018c21SShaohua Li * reconfig mutex. On the other hand, raid5_quiesce is called with 13308e018c21SShaohua Li * reconfig_mutex hold. The first step of raid5_quiesce() is waitting 13318e018c21SShaohua Li * for all IO finish, hence waitting for reclaim thread, while reclaim 13328e018c21SShaohua Li * thread is calling this function and waitting for reconfig mutex. So 13338e018c21SShaohua Li * there is a deadlock. We workaround this issue with a trylock. 13348e018c21SShaohua Li * FIXME: we could miss discard if we can't take reconfig mutex 13354b482044SShaohua Li */ 13362953079cSShaohua Li set_mask_bits(&mddev->sb_flags, 0, 13372953079cSShaohua Li BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 13388e018c21SShaohua Li if (!mddev_trylock(mddev)) 13398e018c21SShaohua Li return; 13404b482044SShaohua Li md_update_sb(mddev, 1); 13418e018c21SShaohua Li mddev_unlock(mddev); 13424b482044SShaohua Li 13436e74a9cfSShaohua Li /* discard IO error really doesn't matter, ignore it */ 13444b482044SShaohua Li if (log->last_checkpoint < end) { 13454b482044SShaohua Li blkdev_issue_discard(bdev, 13464b482044SShaohua Li log->last_checkpoint + log->rdev->data_offset, 13474b482044SShaohua Li end - log->last_checkpoint, GFP_NOIO, 0); 13484b482044SShaohua Li } else { 13494b482044SShaohua Li blkdev_issue_discard(bdev, 13504b482044SShaohua Li log->last_checkpoint + log->rdev->data_offset, 13514b482044SShaohua Li log->device_size - log->last_checkpoint, 13524b482044SShaohua Li GFP_NOIO, 0); 13534b482044SShaohua Li blkdev_issue_discard(bdev, log->rdev->data_offset, end, 13544b482044SShaohua Li GFP_NOIO, 0); 13554b482044SShaohua Li } 13564b482044SShaohua Li } 13574b482044SShaohua Li 1358a39f7afdSSong Liu /* 1359a39f7afdSSong Liu * r5c_flush_stripe moves stripe from cached list to handle_list. When called, 1360a39f7afdSSong Liu * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes. 1361a39f7afdSSong Liu * 1362a39f7afdSSong Liu * must hold conf->device_lock 1363a39f7afdSSong Liu */ 1364a39f7afdSSong Liu static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh) 1365a39f7afdSSong Liu { 1366a39f7afdSSong Liu BUG_ON(list_empty(&sh->lru)); 1367a39f7afdSSong Liu BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 1368a39f7afdSSong Liu BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 1369a39f7afdSSong Liu 1370a39f7afdSSong Liu /* 1371a39f7afdSSong Liu * The stripe is not ON_RELEASE_LIST, so it is safe to call 1372a39f7afdSSong Liu * raid5_release_stripe() while holding conf->device_lock 1373a39f7afdSSong Liu */ 1374a39f7afdSSong Liu BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); 1375efa4b77bSShaohua Li lockdep_assert_held(&conf->device_lock); 1376a39f7afdSSong Liu 1377a39f7afdSSong Liu list_del_init(&sh->lru); 1378a39f7afdSSong Liu atomic_inc(&sh->count); 1379a39f7afdSSong Liu 1380a39f7afdSSong Liu set_bit(STRIPE_HANDLE, &sh->state); 1381a39f7afdSSong Liu atomic_inc(&conf->active_stripes); 1382a39f7afdSSong Liu r5c_make_stripe_write_out(sh); 1383a39f7afdSSong Liu 1384e33fbb9cSShaohua Li if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) 1385e33fbb9cSShaohua Li atomic_inc(&conf->r5c_flushing_partial_stripes); 1386e33fbb9cSShaohua Li else 1387e33fbb9cSShaohua Li atomic_inc(&conf->r5c_flushing_full_stripes); 1388a39f7afdSSong Liu raid5_release_stripe(sh); 1389a39f7afdSSong Liu } 1390a39f7afdSSong Liu 1391a39f7afdSSong Liu /* 1392a39f7afdSSong Liu * if num == 0, flush all full stripes 1393a39f7afdSSong Liu * if num > 0, flush all full stripes. If less than num full stripes are 1394a39f7afdSSong Liu * flushed, flush some partial stripes until totally num stripes are 1395a39f7afdSSong Liu * flushed or there is no more cached stripes. 1396a39f7afdSSong Liu */ 1397a39f7afdSSong Liu void r5c_flush_cache(struct r5conf *conf, int num) 1398a39f7afdSSong Liu { 1399a39f7afdSSong Liu int count; 1400a39f7afdSSong Liu struct stripe_head *sh, *next; 1401a39f7afdSSong Liu 1402efa4b77bSShaohua Li lockdep_assert_held(&conf->device_lock); 1403a39f7afdSSong Liu if (!conf->log) 1404a39f7afdSSong Liu return; 1405a39f7afdSSong Liu 1406a39f7afdSSong Liu count = 0; 1407a39f7afdSSong Liu list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { 1408a39f7afdSSong Liu r5c_flush_stripe(conf, sh); 1409a39f7afdSSong Liu count++; 1410a39f7afdSSong Liu } 1411a39f7afdSSong Liu 1412a39f7afdSSong Liu if (count >= num) 1413a39f7afdSSong Liu return; 1414a39f7afdSSong Liu list_for_each_entry_safe(sh, next, 1415a39f7afdSSong Liu &conf->r5c_partial_stripe_list, lru) { 1416a39f7afdSSong Liu r5c_flush_stripe(conf, sh); 1417a39f7afdSSong Liu if (++count >= num) 1418a39f7afdSSong Liu break; 1419a39f7afdSSong Liu } 1420a39f7afdSSong Liu } 1421a39f7afdSSong Liu 1422a39f7afdSSong Liu static void r5c_do_reclaim(struct r5conf *conf) 1423a39f7afdSSong Liu { 1424a39f7afdSSong Liu struct r5l_log *log = conf->log; 1425a39f7afdSSong Liu struct stripe_head *sh; 1426a39f7afdSSong Liu int count = 0; 1427a39f7afdSSong Liu unsigned long flags; 1428a39f7afdSSong Liu int total_cached; 1429a39f7afdSSong Liu int stripes_to_flush; 1430e33fbb9cSShaohua Li int flushing_partial, flushing_full; 1431a39f7afdSSong Liu 1432a39f7afdSSong Liu if (!r5c_is_writeback(log)) 1433a39f7afdSSong Liu return; 1434a39f7afdSSong Liu 1435e33fbb9cSShaohua Li flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes); 1436e33fbb9cSShaohua Li flushing_full = atomic_read(&conf->r5c_flushing_full_stripes); 1437a39f7afdSSong Liu total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + 1438e33fbb9cSShaohua Li atomic_read(&conf->r5c_cached_full_stripes) - 1439e33fbb9cSShaohua Li flushing_full - flushing_partial; 1440a39f7afdSSong Liu 1441a39f7afdSSong Liu if (total_cached > conf->min_nr_stripes * 3 / 4 || 1442a39f7afdSSong Liu atomic_read(&conf->empty_inactive_list_nr) > 0) 1443a39f7afdSSong Liu /* 1444a39f7afdSSong Liu * if stripe cache pressure high, flush all full stripes and 1445a39f7afdSSong Liu * some partial stripes 1446a39f7afdSSong Liu */ 1447a39f7afdSSong Liu stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP; 1448a39f7afdSSong Liu else if (total_cached > conf->min_nr_stripes * 1 / 2 || 1449e33fbb9cSShaohua Li atomic_read(&conf->r5c_cached_full_stripes) - flushing_full > 145084890c03SShaohua Li R5C_FULL_STRIPE_FLUSH_BATCH(conf)) 1451a39f7afdSSong Liu /* 1452a39f7afdSSong Liu * if stripe cache pressure moderate, or if there is many full 1453a39f7afdSSong Liu * stripes,flush all full stripes 1454a39f7afdSSong Liu */ 1455a39f7afdSSong Liu stripes_to_flush = 0; 1456a39f7afdSSong Liu else 1457a39f7afdSSong Liu /* no need to flush */ 1458a39f7afdSSong Liu stripes_to_flush = -1; 1459a39f7afdSSong Liu 1460a39f7afdSSong Liu if (stripes_to_flush >= 0) { 1461a39f7afdSSong Liu spin_lock_irqsave(&conf->device_lock, flags); 1462a39f7afdSSong Liu r5c_flush_cache(conf, stripes_to_flush); 1463a39f7afdSSong Liu spin_unlock_irqrestore(&conf->device_lock, flags); 1464a39f7afdSSong Liu } 1465a39f7afdSSong Liu 1466a39f7afdSSong Liu /* if log space is tight, flush stripes on stripe_in_journal_list */ 1467a39f7afdSSong Liu if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) { 1468a39f7afdSSong Liu spin_lock_irqsave(&log->stripe_in_journal_lock, flags); 1469a39f7afdSSong Liu spin_lock(&conf->device_lock); 1470a39f7afdSSong Liu list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) { 1471a39f7afdSSong Liu /* 1472a39f7afdSSong Liu * stripes on stripe_in_journal_list could be in any 1473a39f7afdSSong Liu * state of the stripe_cache state machine. In this 1474a39f7afdSSong Liu * case, we only want to flush stripe on 1475a39f7afdSSong Liu * r5c_cached_full/partial_stripes. The following 1476a39f7afdSSong Liu * condition makes sure the stripe is on one of the 1477a39f7afdSSong Liu * two lists. 1478a39f7afdSSong Liu */ 1479a39f7afdSSong Liu if (!list_empty(&sh->lru) && 1480a39f7afdSSong Liu !test_bit(STRIPE_HANDLE, &sh->state) && 1481a39f7afdSSong Liu atomic_read(&sh->count) == 0) { 1482a39f7afdSSong Liu r5c_flush_stripe(conf, sh); 1483a39f7afdSSong Liu if (count++ >= R5C_RECLAIM_STRIPE_GROUP) 1484a39f7afdSSong Liu break; 1485a39f7afdSSong Liu } 1486e8fd52eeSShaohua Li } 1487a39f7afdSSong Liu spin_unlock(&conf->device_lock); 1488a39f7afdSSong Liu spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1489a39f7afdSSong Liu } 1490f687a33eSSong Liu 1491f687a33eSSong Liu if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) 1492f687a33eSSong Liu r5l_run_no_space_stripes(log); 1493f687a33eSSong Liu 1494a39f7afdSSong Liu md_wakeup_thread(conf->mddev->thread); 1495a39f7afdSSong Liu } 14964b482044SShaohua Li 14970576b1c6SShaohua Li static void r5l_do_reclaim(struct r5l_log *log) 14980576b1c6SShaohua Li { 1499a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private; 15000576b1c6SShaohua Li sector_t reclaim_target = xchg(&log->reclaim_target, 0); 150117036461SChristoph Hellwig sector_t reclaimable; 150217036461SChristoph Hellwig sector_t next_checkpoint; 1503a39f7afdSSong Liu bool write_super; 15040576b1c6SShaohua Li 15050576b1c6SShaohua Li spin_lock_irq(&log->io_list_lock); 1506a39f7afdSSong Liu write_super = r5l_reclaimable_space(log) > log->max_free_space || 1507a39f7afdSSong Liu reclaim_target != 0 || !list_empty(&log->no_space_stripes); 15080576b1c6SShaohua Li /* 15090576b1c6SShaohua Li * move proper io_unit to reclaim list. We should not change the order. 15100576b1c6SShaohua Li * reclaimable/unreclaimable io_unit can be mixed in the list, we 15110576b1c6SShaohua Li * shouldn't reuse space of an unreclaimable io_unit 15120576b1c6SShaohua Li */ 15130576b1c6SShaohua Li while (1) { 151417036461SChristoph Hellwig reclaimable = r5l_reclaimable_space(log); 151517036461SChristoph Hellwig if (reclaimable >= reclaim_target || 15160576b1c6SShaohua Li (list_empty(&log->running_ios) && 15170576b1c6SShaohua Li list_empty(&log->io_end_ios) && 1518a8c34f91SShaohua Li list_empty(&log->flushing_ios) && 151904732f74SChristoph Hellwig list_empty(&log->finished_ios))) 15200576b1c6SShaohua Li break; 15210576b1c6SShaohua Li 152217036461SChristoph Hellwig md_wakeup_thread(log->rdev->mddev->thread); 152317036461SChristoph Hellwig wait_event_lock_irq(log->iounit_wait, 152417036461SChristoph Hellwig r5l_reclaimable_space(log) > reclaimable, 152517036461SChristoph Hellwig log->io_list_lock); 15260576b1c6SShaohua Li } 152717036461SChristoph Hellwig 1528a39f7afdSSong Liu next_checkpoint = r5c_calculate_new_cp(conf); 15290576b1c6SShaohua Li spin_unlock_irq(&log->io_list_lock); 15300576b1c6SShaohua Li 1531a39f7afdSSong Liu if (reclaimable == 0 || !write_super) 15320576b1c6SShaohua Li return; 15330576b1c6SShaohua Li 15340576b1c6SShaohua Li /* 15350576b1c6SShaohua Li * write_super will flush cache of each raid disk. We must write super 15360576b1c6SShaohua Li * here, because the log area might be reused soon and we don't want to 15370576b1c6SShaohua Li * confuse recovery 15380576b1c6SShaohua Li */ 15394b482044SShaohua Li r5l_write_super_and_discard_space(log, next_checkpoint); 15400576b1c6SShaohua Li 15410576b1c6SShaohua Li mutex_lock(&log->io_mutex); 154217036461SChristoph Hellwig log->last_checkpoint = next_checkpoint; 1543a39f7afdSSong Liu r5c_update_log_state(log); 15440576b1c6SShaohua Li mutex_unlock(&log->io_mutex); 15450576b1c6SShaohua Li 154617036461SChristoph Hellwig r5l_run_no_space_stripes(log); 15470576b1c6SShaohua Li } 15480576b1c6SShaohua Li 15490576b1c6SShaohua Li static void r5l_reclaim_thread(struct md_thread *thread) 15500576b1c6SShaohua Li { 15510576b1c6SShaohua Li struct mddev *mddev = thread->mddev; 15520576b1c6SShaohua Li struct r5conf *conf = mddev->private; 15530576b1c6SShaohua Li struct r5l_log *log = conf->log; 15540576b1c6SShaohua Li 15550576b1c6SShaohua Li if (!log) 15560576b1c6SShaohua Li return; 1557a39f7afdSSong Liu r5c_do_reclaim(conf); 15580576b1c6SShaohua Li r5l_do_reclaim(log); 15590576b1c6SShaohua Li } 15600576b1c6SShaohua Li 1561a39f7afdSSong Liu void r5l_wake_reclaim(struct r5l_log *log, sector_t space) 1562f6bed0efSShaohua Li { 15630576b1c6SShaohua Li unsigned long target; 15640576b1c6SShaohua Li unsigned long new = (unsigned long)space; /* overflow in theory */ 15650576b1c6SShaohua Li 1566a39f7afdSSong Liu if (!log) 1567a39f7afdSSong Liu return; 15680576b1c6SShaohua Li do { 15690576b1c6SShaohua Li target = log->reclaim_target; 15700576b1c6SShaohua Li if (new < target) 15710576b1c6SShaohua Li return; 15720576b1c6SShaohua Li } while (cmpxchg(&log->reclaim_target, target, new) != target); 15730576b1c6SShaohua Li md_wakeup_thread(log->reclaim_thread); 1574f6bed0efSShaohua Li } 1575f6bed0efSShaohua Li 1576b03e0ccbSNeilBrown void r5l_quiesce(struct r5l_log *log, int quiesce) 1577e6c033f7SShaohua Li { 15784b482044SShaohua Li struct mddev *mddev; 1579b03e0ccbSNeilBrown 1580b03e0ccbSNeilBrown if (quiesce) { 15814b482044SShaohua Li /* make sure r5l_write_super_and_discard_space exits */ 15824b482044SShaohua Li mddev = log->rdev->mddev; 15834b482044SShaohua Li wake_up(&mddev->sb_wait); 1584ce1ccd07SShaohua Li kthread_park(log->reclaim_thread->tsk); 1585a39f7afdSSong Liu r5l_wake_reclaim(log, MaxSector); 1586e6c033f7SShaohua Li r5l_do_reclaim(log); 1587b03e0ccbSNeilBrown } else 1588b03e0ccbSNeilBrown kthread_unpark(log->reclaim_thread->tsk); 1589e6c033f7SShaohua Li } 1590e6c033f7SShaohua Li 15916e74a9cfSShaohua Li bool r5l_log_disk_error(struct r5conf *conf) 15926e74a9cfSShaohua Li { 1593f6b6ec5cSShaohua Li struct r5l_log *log; 1594f6b6ec5cSShaohua Li bool ret; 15957dde2ad3SShaohua Li /* don't allow write if journal disk is missing */ 1596f6b6ec5cSShaohua Li rcu_read_lock(); 1597f6b6ec5cSShaohua Li log = rcu_dereference(conf->log); 1598f6b6ec5cSShaohua Li 1599f6b6ec5cSShaohua Li if (!log) 1600f6b6ec5cSShaohua Li ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 1601f6b6ec5cSShaohua Li else 1602f6b6ec5cSShaohua Li ret = test_bit(Faulty, &log->rdev->flags); 1603f6b6ec5cSShaohua Li rcu_read_unlock(); 1604f6b6ec5cSShaohua Li return ret; 16056e74a9cfSShaohua Li } 16066e74a9cfSShaohua Li 1607effe6ee7SSong Liu #define R5L_RECOVERY_PAGE_POOL_SIZE 256 1608effe6ee7SSong Liu 1609355810d1SShaohua Li struct r5l_recovery_ctx { 1610355810d1SShaohua Li struct page *meta_page; /* current meta */ 1611355810d1SShaohua Li sector_t meta_total_blocks; /* total size of current meta and data */ 1612355810d1SShaohua Li sector_t pos; /* recovery position */ 1613355810d1SShaohua Li u64 seq; /* recovery position seq */ 1614b4c625c6SSong Liu int data_parity_stripes; /* number of data_parity stripes */ 1615b4c625c6SSong Liu int data_only_stripes; /* number of data_only stripes */ 1616b4c625c6SSong Liu struct list_head cached_list; 1617effe6ee7SSong Liu 1618effe6ee7SSong Liu /* 1619effe6ee7SSong Liu * read ahead page pool (ra_pool) 1620effe6ee7SSong Liu * in recovery, log is read sequentially. It is not efficient to 1621effe6ee7SSong Liu * read every page with sync_page_io(). The read ahead page pool 1622effe6ee7SSong Liu * reads multiple pages with one IO, so further log read can 1623effe6ee7SSong Liu * just copy data from the pool. 1624effe6ee7SSong Liu */ 1625effe6ee7SSong Liu struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE]; 1626effe6ee7SSong Liu sector_t pool_offset; /* offset of first page in the pool */ 1627effe6ee7SSong Liu int total_pages; /* total allocated pages */ 1628effe6ee7SSong Liu int valid_pages; /* pages with valid data */ 1629effe6ee7SSong Liu struct bio *ra_bio; /* bio to do the read ahead */ 1630355810d1SShaohua Li }; 1631355810d1SShaohua Li 1632effe6ee7SSong Liu static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, 1633effe6ee7SSong Liu struct r5l_recovery_ctx *ctx) 1634effe6ee7SSong Liu { 1635effe6ee7SSong Liu struct page *page; 1636effe6ee7SSong Liu 1637609be106SChristoph Hellwig ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL, 1638609be106SChristoph Hellwig &log->bs); 1639effe6ee7SSong Liu if (!ctx->ra_bio) 1640effe6ee7SSong Liu return -ENOMEM; 1641effe6ee7SSong Liu 1642effe6ee7SSong Liu ctx->valid_pages = 0; 1643effe6ee7SSong Liu ctx->total_pages = 0; 1644effe6ee7SSong Liu while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) { 1645effe6ee7SSong Liu page = alloc_page(GFP_KERNEL); 1646effe6ee7SSong Liu 1647effe6ee7SSong Liu if (!page) 1648effe6ee7SSong Liu break; 1649effe6ee7SSong Liu ctx->ra_pool[ctx->total_pages] = page; 1650effe6ee7SSong Liu ctx->total_pages += 1; 1651effe6ee7SSong Liu } 1652effe6ee7SSong Liu 1653effe6ee7SSong Liu if (ctx->total_pages == 0) { 1654effe6ee7SSong Liu bio_put(ctx->ra_bio); 1655effe6ee7SSong Liu return -ENOMEM; 1656effe6ee7SSong Liu } 1657effe6ee7SSong Liu 1658effe6ee7SSong Liu ctx->pool_offset = 0; 1659effe6ee7SSong Liu return 0; 1660effe6ee7SSong Liu } 1661effe6ee7SSong Liu 1662effe6ee7SSong Liu static void r5l_recovery_free_ra_pool(struct r5l_log *log, 1663effe6ee7SSong Liu struct r5l_recovery_ctx *ctx) 1664effe6ee7SSong Liu { 1665effe6ee7SSong Liu int i; 1666effe6ee7SSong Liu 1667effe6ee7SSong Liu for (i = 0; i < ctx->total_pages; ++i) 1668effe6ee7SSong Liu put_page(ctx->ra_pool[i]); 1669effe6ee7SSong Liu bio_put(ctx->ra_bio); 1670effe6ee7SSong Liu } 1671effe6ee7SSong Liu 1672effe6ee7SSong Liu /* 1673effe6ee7SSong Liu * fetch ctx->valid_pages pages from offset 1674effe6ee7SSong Liu * In normal cases, ctx->valid_pages == ctx->total_pages after the call. 1675effe6ee7SSong Liu * However, if the offset is close to the end of the journal device, 1676effe6ee7SSong Liu * ctx->valid_pages could be smaller than ctx->total_pages 1677effe6ee7SSong Liu */ 1678effe6ee7SSong Liu static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, 1679effe6ee7SSong Liu struct r5l_recovery_ctx *ctx, 1680effe6ee7SSong Liu sector_t offset) 1681effe6ee7SSong Liu { 1682a7c50c94SChristoph Hellwig bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ); 1683effe6ee7SSong Liu ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; 1684effe6ee7SSong Liu 1685effe6ee7SSong Liu ctx->valid_pages = 0; 1686effe6ee7SSong Liu ctx->pool_offset = offset; 1687effe6ee7SSong Liu 1688effe6ee7SSong Liu while (ctx->valid_pages < ctx->total_pages) { 1689effe6ee7SSong Liu bio_add_page(ctx->ra_bio, 1690effe6ee7SSong Liu ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0); 1691effe6ee7SSong Liu ctx->valid_pages += 1; 1692effe6ee7SSong Liu 1693effe6ee7SSong Liu offset = r5l_ring_add(log, offset, BLOCK_SECTORS); 1694effe6ee7SSong Liu 1695effe6ee7SSong Liu if (offset == 0) /* reached end of the device */ 1696effe6ee7SSong Liu break; 1697effe6ee7SSong Liu } 1698effe6ee7SSong Liu 1699effe6ee7SSong Liu return submit_bio_wait(ctx->ra_bio); 1700effe6ee7SSong Liu } 1701effe6ee7SSong Liu 1702effe6ee7SSong Liu /* 1703effe6ee7SSong Liu * try read a page from the read ahead page pool, if the page is not in the 1704effe6ee7SSong Liu * pool, call r5l_recovery_fetch_ra_pool 1705effe6ee7SSong Liu */ 1706effe6ee7SSong Liu static int r5l_recovery_read_page(struct r5l_log *log, 1707effe6ee7SSong Liu struct r5l_recovery_ctx *ctx, 1708effe6ee7SSong Liu struct page *page, 1709effe6ee7SSong Liu sector_t offset) 1710effe6ee7SSong Liu { 1711effe6ee7SSong Liu int ret; 1712effe6ee7SSong Liu 1713effe6ee7SSong Liu if (offset < ctx->pool_offset || 1714effe6ee7SSong Liu offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) { 1715effe6ee7SSong Liu ret = r5l_recovery_fetch_ra_pool(log, ctx, offset); 1716effe6ee7SSong Liu if (ret) 1717effe6ee7SSong Liu return ret; 1718effe6ee7SSong Liu } 1719effe6ee7SSong Liu 1720effe6ee7SSong Liu BUG_ON(offset < ctx->pool_offset || 1721effe6ee7SSong Liu offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS); 1722effe6ee7SSong Liu 1723effe6ee7SSong Liu memcpy(page_address(page), 1724effe6ee7SSong Liu page_address(ctx->ra_pool[(offset - ctx->pool_offset) >> 1725effe6ee7SSong Liu BLOCK_SECTOR_SHIFT]), 1726effe6ee7SSong Liu PAGE_SIZE); 1727effe6ee7SSong Liu return 0; 1728effe6ee7SSong Liu } 1729effe6ee7SSong Liu 17309ed988f5SSong Liu static int r5l_recovery_read_meta_block(struct r5l_log *log, 1731355810d1SShaohua Li struct r5l_recovery_ctx *ctx) 1732355810d1SShaohua Li { 1733355810d1SShaohua Li struct page *page = ctx->meta_page; 1734355810d1SShaohua Li struct r5l_meta_block *mb; 1735355810d1SShaohua Li u32 crc, stored_crc; 1736effe6ee7SSong Liu int ret; 1737355810d1SShaohua Li 1738effe6ee7SSong Liu ret = r5l_recovery_read_page(log, ctx, page, ctx->pos); 1739effe6ee7SSong Liu if (ret != 0) 1740effe6ee7SSong Liu return ret; 1741355810d1SShaohua Li 1742355810d1SShaohua Li mb = page_address(page); 1743355810d1SShaohua Li stored_crc = le32_to_cpu(mb->checksum); 1744355810d1SShaohua Li mb->checksum = 0; 1745355810d1SShaohua Li 1746355810d1SShaohua Li if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || 1747355810d1SShaohua Li le64_to_cpu(mb->seq) != ctx->seq || 1748355810d1SShaohua Li mb->version != R5LOG_VERSION || 1749355810d1SShaohua Li le64_to_cpu(mb->position) != ctx->pos) 1750355810d1SShaohua Li return -EINVAL; 1751355810d1SShaohua Li 17525cb2fbd6SShaohua Li crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 1753355810d1SShaohua Li if (stored_crc != crc) 1754355810d1SShaohua Li return -EINVAL; 1755355810d1SShaohua Li 1756355810d1SShaohua Li if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) 1757355810d1SShaohua Li return -EINVAL; 1758355810d1SShaohua Li 1759355810d1SShaohua Li ctx->meta_total_blocks = BLOCK_SECTORS; 1760355810d1SShaohua Li 1761355810d1SShaohua Li return 0; 1762355810d1SShaohua Li } 1763355810d1SShaohua Li 17649ed988f5SSong Liu static void 17659ed988f5SSong Liu r5l_recovery_create_empty_meta_block(struct r5l_log *log, 17669ed988f5SSong Liu struct page *page, 17679ed988f5SSong Liu sector_t pos, u64 seq) 1768355810d1SShaohua Li { 1769355810d1SShaohua Li struct r5l_meta_block *mb; 1770355810d1SShaohua Li 1771355810d1SShaohua Li mb = page_address(page); 17729ed988f5SSong Liu clear_page(mb); 1773355810d1SShaohua Li mb->magic = cpu_to_le32(R5LOG_MAGIC); 1774355810d1SShaohua Li mb->version = R5LOG_VERSION; 1775355810d1SShaohua Li mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); 1776355810d1SShaohua Li mb->seq = cpu_to_le64(seq); 1777355810d1SShaohua Li mb->position = cpu_to_le64(pos); 1778355810d1SShaohua Li } 1779355810d1SShaohua Li 1780355810d1SShaohua Li static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, 1781355810d1SShaohua Li u64 seq) 1782355810d1SShaohua Li { 1783355810d1SShaohua Li struct page *page; 1784355810d1SShaohua Li struct r5l_meta_block *mb; 1785355810d1SShaohua Li 17869ed988f5SSong Liu page = alloc_page(GFP_KERNEL); 1787355810d1SShaohua Li if (!page) 1788355810d1SShaohua Li return -ENOMEM; 17899ed988f5SSong Liu r5l_recovery_create_empty_meta_block(log, page, pos, seq); 1790355810d1SShaohua Li mb = page_address(page); 17915c88f403SSong Liu mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 17925c88f403SSong Liu mb, PAGE_SIZE)); 1793796a5cf0SMike Christie if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, 17945a8948f8SJan Kara REQ_SYNC | REQ_FUA, false)) { 1795355810d1SShaohua Li __free_page(page); 1796355810d1SShaohua Li return -EIO; 1797355810d1SShaohua Li } 1798355810d1SShaohua Li __free_page(page); 1799355810d1SShaohua Li return 0; 1800355810d1SShaohua Li } 1801355810d1SShaohua Li 1802b4c625c6SSong Liu /* 1803b4c625c6SSong Liu * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite 1804b4c625c6SSong Liu * to mark valid (potentially not flushed) data in the journal. 1805b4c625c6SSong Liu * 1806b4c625c6SSong Liu * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb, 1807b4c625c6SSong Liu * so there should not be any mismatch here. 1808b4c625c6SSong Liu */ 1809b4c625c6SSong Liu static void r5l_recovery_load_data(struct r5l_log *log, 1810b4c625c6SSong Liu struct stripe_head *sh, 1811b4c625c6SSong Liu struct r5l_recovery_ctx *ctx, 1812b4c625c6SSong Liu struct r5l_payload_data_parity *payload, 1813b4c625c6SSong Liu sector_t log_offset) 1814f6bed0efSShaohua Li { 1815b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 1816b4c625c6SSong Liu struct r5conf *conf = mddev->private; 1817b4c625c6SSong Liu int dd_idx; 1818355810d1SShaohua Li 1819b4c625c6SSong Liu raid5_compute_sector(conf, 1820b4c625c6SSong Liu le64_to_cpu(payload->location), 0, 1821b4c625c6SSong Liu &dd_idx, sh); 1822effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset); 1823b4c625c6SSong Liu sh->dev[dd_idx].log_checksum = 1824b4c625c6SSong Liu le32_to_cpu(payload->checksum[0]); 1825b4c625c6SSong Liu ctx->meta_total_blocks += BLOCK_SECTORS; 1826b4c625c6SSong Liu 1827b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags); 1828b4c625c6SSong Liu set_bit(STRIPE_R5C_CACHING, &sh->state); 1829b4c625c6SSong Liu } 1830b4c625c6SSong Liu 1831b4c625c6SSong Liu static void r5l_recovery_load_parity(struct r5l_log *log, 1832b4c625c6SSong Liu struct stripe_head *sh, 1833b4c625c6SSong Liu struct r5l_recovery_ctx *ctx, 1834b4c625c6SSong Liu struct r5l_payload_data_parity *payload, 1835b4c625c6SSong Liu sector_t log_offset) 1836b4c625c6SSong Liu { 1837b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 1838b4c625c6SSong Liu struct r5conf *conf = mddev->private; 1839b4c625c6SSong Liu 1840b4c625c6SSong Liu ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; 1841effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset); 1842b4c625c6SSong Liu sh->dev[sh->pd_idx].log_checksum = 1843b4c625c6SSong Liu le32_to_cpu(payload->checksum[0]); 1844b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags); 1845b4c625c6SSong Liu 1846b4c625c6SSong Liu if (sh->qd_idx >= 0) { 1847effe6ee7SSong Liu r5l_recovery_read_page( 1848effe6ee7SSong Liu log, ctx, sh->dev[sh->qd_idx].page, 1849effe6ee7SSong Liu r5l_ring_add(log, log_offset, BLOCK_SECTORS)); 1850b4c625c6SSong Liu sh->dev[sh->qd_idx].log_checksum = 1851b4c625c6SSong Liu le32_to_cpu(payload->checksum[1]); 1852b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags); 1853b4c625c6SSong Liu } 1854b4c625c6SSong Liu clear_bit(STRIPE_R5C_CACHING, &sh->state); 1855b4c625c6SSong Liu } 1856b4c625c6SSong Liu 1857b4c625c6SSong Liu static void r5l_recovery_reset_stripe(struct stripe_head *sh) 1858b4c625c6SSong Liu { 1859b4c625c6SSong Liu int i; 1860b4c625c6SSong Liu 1861b4c625c6SSong Liu sh->state = 0; 1862b4c625c6SSong Liu sh->log_start = MaxSector; 1863b4c625c6SSong Liu for (i = sh->disks; i--; ) 1864b4c625c6SSong Liu sh->dev[i].flags = 0; 1865b4c625c6SSong Liu } 1866b4c625c6SSong Liu 1867b4c625c6SSong Liu static void 1868b4c625c6SSong Liu r5l_recovery_replay_one_stripe(struct r5conf *conf, 1869b4c625c6SSong Liu struct stripe_head *sh, 1870b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 1871b4c625c6SSong Liu { 1872b4c625c6SSong Liu struct md_rdev *rdev, *rrdev; 1873b4c625c6SSong Liu int disk_index; 1874b4c625c6SSong Liu int data_count = 0; 1875b4c625c6SSong Liu 1876b4c625c6SSong Liu for (disk_index = 0; disk_index < sh->disks; disk_index++) { 1877b4c625c6SSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) 1878b4c625c6SSong Liu continue; 1879b4c625c6SSong Liu if (disk_index == sh->qd_idx || disk_index == sh->pd_idx) 1880b4c625c6SSong Liu continue; 1881b4c625c6SSong Liu data_count++; 1882b4c625c6SSong Liu } 1883b4c625c6SSong Liu 1884b4c625c6SSong Liu /* 1885b4c625c6SSong Liu * stripes that only have parity must have been flushed 1886b4c625c6SSong Liu * before the crash that we are now recovering from, so 1887b4c625c6SSong Liu * there is nothing more to recovery. 1888b4c625c6SSong Liu */ 1889b4c625c6SSong Liu if (data_count == 0) 1890b4c625c6SSong Liu goto out; 1891b4c625c6SSong Liu 1892b4c625c6SSong Liu for (disk_index = 0; disk_index < sh->disks; disk_index++) { 1893b4c625c6SSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) 1894b4c625c6SSong Liu continue; 1895b4c625c6SSong Liu 1896b4c625c6SSong Liu /* in case device is broken */ 1897b4c625c6SSong Liu rcu_read_lock(); 1898b4c625c6SSong Liu rdev = rcu_dereference(conf->disks[disk_index].rdev); 1899b4c625c6SSong Liu if (rdev) { 1900b4c625c6SSong Liu atomic_inc(&rdev->nr_pending); 1901b4c625c6SSong Liu rcu_read_unlock(); 1902b4c625c6SSong Liu sync_page_io(rdev, sh->sector, PAGE_SIZE, 1903b4c625c6SSong Liu sh->dev[disk_index].page, REQ_OP_WRITE, 0, 1904b4c625c6SSong Liu false); 1905b4c625c6SSong Liu rdev_dec_pending(rdev, rdev->mddev); 1906b4c625c6SSong Liu rcu_read_lock(); 1907b4c625c6SSong Liu } 1908b4c625c6SSong Liu rrdev = rcu_dereference(conf->disks[disk_index].replacement); 1909b4c625c6SSong Liu if (rrdev) { 1910b4c625c6SSong Liu atomic_inc(&rrdev->nr_pending); 1911b4c625c6SSong Liu rcu_read_unlock(); 1912b4c625c6SSong Liu sync_page_io(rrdev, sh->sector, PAGE_SIZE, 1913b4c625c6SSong Liu sh->dev[disk_index].page, REQ_OP_WRITE, 0, 1914b4c625c6SSong Liu false); 1915b4c625c6SSong Liu rdev_dec_pending(rrdev, rrdev->mddev); 1916b4c625c6SSong Liu rcu_read_lock(); 1917b4c625c6SSong Liu } 1918b4c625c6SSong Liu rcu_read_unlock(); 1919b4c625c6SSong Liu } 1920b4c625c6SSong Liu ctx->data_parity_stripes++; 1921b4c625c6SSong Liu out: 1922b4c625c6SSong Liu r5l_recovery_reset_stripe(sh); 1923b4c625c6SSong Liu } 1924b4c625c6SSong Liu 1925b4c625c6SSong Liu static struct stripe_head * 1926483cbbedSAlexei Naberezhnov r5c_recovery_alloc_stripe( 1927483cbbedSAlexei Naberezhnov struct r5conf *conf, 1928483cbbedSAlexei Naberezhnov sector_t stripe_sect, 1929483cbbedSAlexei Naberezhnov int noblock) 1930b4c625c6SSong Liu { 1931b4c625c6SSong Liu struct stripe_head *sh; 1932b4c625c6SSong Liu 1933483cbbedSAlexei Naberezhnov sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0); 1934b4c625c6SSong Liu if (!sh) 1935b4c625c6SSong Liu return NULL; /* no more stripe available */ 1936b4c625c6SSong Liu 1937b4c625c6SSong Liu r5l_recovery_reset_stripe(sh); 1938b4c625c6SSong Liu 1939b4c625c6SSong Liu return sh; 1940b4c625c6SSong Liu } 1941b4c625c6SSong Liu 1942b4c625c6SSong Liu static struct stripe_head * 1943b4c625c6SSong Liu r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect) 1944b4c625c6SSong Liu { 1945b4c625c6SSong Liu struct stripe_head *sh; 1946b4c625c6SSong Liu 1947b4c625c6SSong Liu list_for_each_entry(sh, list, lru) 1948b4c625c6SSong Liu if (sh->sector == sect) 1949b4c625c6SSong Liu return sh; 1950b4c625c6SSong Liu return NULL; 1951b4c625c6SSong Liu } 1952b4c625c6SSong Liu 1953b4c625c6SSong Liu static void 1954b4c625c6SSong Liu r5c_recovery_drop_stripes(struct list_head *cached_stripe_list, 1955b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 1956b4c625c6SSong Liu { 1957b4c625c6SSong Liu struct stripe_head *sh, *next; 1958b4c625c6SSong Liu 1959b4c625c6SSong Liu list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { 1960b4c625c6SSong Liu r5l_recovery_reset_stripe(sh); 1961b4c625c6SSong Liu list_del_init(&sh->lru); 1962b4c625c6SSong Liu raid5_release_stripe(sh); 1963b4c625c6SSong Liu } 1964b4c625c6SSong Liu } 1965b4c625c6SSong Liu 1966b4c625c6SSong Liu static void 1967b4c625c6SSong Liu r5c_recovery_replay_stripes(struct list_head *cached_stripe_list, 1968b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 1969b4c625c6SSong Liu { 1970b4c625c6SSong Liu struct stripe_head *sh, *next; 1971b4c625c6SSong Liu 1972b4c625c6SSong Liu list_for_each_entry_safe(sh, next, cached_stripe_list, lru) 1973b4c625c6SSong Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { 1974b4c625c6SSong Liu r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx); 1975b4c625c6SSong Liu list_del_init(&sh->lru); 1976b4c625c6SSong Liu raid5_release_stripe(sh); 1977b4c625c6SSong Liu } 1978b4c625c6SSong Liu } 1979b4c625c6SSong Liu 1980b4c625c6SSong Liu /* if matches return 0; otherwise return -EINVAL */ 1981b4c625c6SSong Liu static int 1982effe6ee7SSong Liu r5l_recovery_verify_data_checksum(struct r5l_log *log, 1983effe6ee7SSong Liu struct r5l_recovery_ctx *ctx, 1984effe6ee7SSong Liu struct page *page, 1985b4c625c6SSong Liu sector_t log_offset, __le32 log_checksum) 1986b4c625c6SSong Liu { 1987b4c625c6SSong Liu void *addr; 1988b4c625c6SSong Liu u32 checksum; 1989b4c625c6SSong Liu 1990effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, page, log_offset); 1991b4c625c6SSong Liu addr = kmap_atomic(page); 1992b4c625c6SSong Liu checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); 1993b4c625c6SSong Liu kunmap_atomic(addr); 1994b4c625c6SSong Liu return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL; 1995b4c625c6SSong Liu } 1996b4c625c6SSong Liu 1997b4c625c6SSong Liu /* 1998b4c625c6SSong Liu * before loading data to stripe cache, we need verify checksum for all data, 1999b4c625c6SSong Liu * if there is mismatch for any data page, we drop all data in the mata block 2000b4c625c6SSong Liu */ 2001b4c625c6SSong Liu static int 2002b4c625c6SSong Liu r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log, 2003b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 2004b4c625c6SSong Liu { 2005b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 2006b4c625c6SSong Liu struct r5conf *conf = mddev->private; 2007b4c625c6SSong Liu struct r5l_meta_block *mb = page_address(ctx->meta_page); 2008b4c625c6SSong Liu sector_t mb_offset = sizeof(struct r5l_meta_block); 2009b4c625c6SSong Liu sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2010b4c625c6SSong Liu struct page *page; 2011b4c625c6SSong Liu struct r5l_payload_data_parity *payload; 20122d4f4687SSong Liu struct r5l_payload_flush *payload_flush; 2013b4c625c6SSong Liu 2014b4c625c6SSong Liu page = alloc_page(GFP_KERNEL); 2015b4c625c6SSong Liu if (!page) 2016355810d1SShaohua Li return -ENOMEM; 2017355810d1SShaohua Li 2018b4c625c6SSong Liu while (mb_offset < le32_to_cpu(mb->meta_size)) { 2019b4c625c6SSong Liu payload = (void *)mb + mb_offset; 20202d4f4687SSong Liu payload_flush = (void *)mb + mb_offset; 2021b4c625c6SSong Liu 20221ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { 2023b4c625c6SSong Liu if (r5l_recovery_verify_data_checksum( 2024effe6ee7SSong Liu log, ctx, page, log_offset, 2025b4c625c6SSong Liu payload->checksum[0]) < 0) 2026b4c625c6SSong Liu goto mismatch; 20271ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) { 2028b4c625c6SSong Liu if (r5l_recovery_verify_data_checksum( 2029effe6ee7SSong Liu log, ctx, page, log_offset, 2030b4c625c6SSong Liu payload->checksum[0]) < 0) 2031b4c625c6SSong Liu goto mismatch; 2032b4c625c6SSong Liu if (conf->max_degraded == 2 && /* q for RAID 6 */ 2033b4c625c6SSong Liu r5l_recovery_verify_data_checksum( 2034effe6ee7SSong Liu log, ctx, page, 2035b4c625c6SSong Liu r5l_ring_add(log, log_offset, 2036b4c625c6SSong Liu BLOCK_SECTORS), 2037b4c625c6SSong Liu payload->checksum[1]) < 0) 2038b4c625c6SSong Liu goto mismatch; 20391ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 20402d4f4687SSong Liu /* nothing to do for R5LOG_PAYLOAD_FLUSH here */ 20412d4f4687SSong Liu } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */ 2042b4c625c6SSong Liu goto mismatch; 2043b4c625c6SSong Liu 20441ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 20452d4f4687SSong Liu mb_offset += sizeof(struct r5l_payload_flush) + 20462d4f4687SSong Liu le32_to_cpu(payload_flush->size); 20472d4f4687SSong Liu } else { 20482d4f4687SSong Liu /* DATA or PARITY payload */ 2049b4c625c6SSong Liu log_offset = r5l_ring_add(log, log_offset, 2050b4c625c6SSong Liu le32_to_cpu(payload->size)); 2051b4c625c6SSong Liu mb_offset += sizeof(struct r5l_payload_data_parity) + 2052b4c625c6SSong Liu sizeof(__le32) * 2053b4c625c6SSong Liu (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); 2054b4c625c6SSong Liu } 2055b4c625c6SSong Liu 20562d4f4687SSong Liu } 20572d4f4687SSong Liu 2058b4c625c6SSong Liu put_page(page); 2059b4c625c6SSong Liu return 0; 2060b4c625c6SSong Liu 2061b4c625c6SSong Liu mismatch: 2062b4c625c6SSong Liu put_page(page); 2063b4c625c6SSong Liu return -EINVAL; 2064b4c625c6SSong Liu } 2065b4c625c6SSong Liu 2066b4c625c6SSong Liu /* 2067b4c625c6SSong Liu * Analyze all data/parity pages in one meta block 2068b4c625c6SSong Liu * Returns: 2069b4c625c6SSong Liu * 0 for success 2070b4c625c6SSong Liu * -EINVAL for unknown playload type 2071b4c625c6SSong Liu * -EAGAIN for checksum mismatch of data page 2072b4c625c6SSong Liu * -ENOMEM for run out of memory (alloc_page failed or run out of stripes) 2073b4c625c6SSong Liu */ 2074b4c625c6SSong Liu static int 2075b4c625c6SSong Liu r5c_recovery_analyze_meta_block(struct r5l_log *log, 2076b4c625c6SSong Liu struct r5l_recovery_ctx *ctx, 2077b4c625c6SSong Liu struct list_head *cached_stripe_list) 2078b4c625c6SSong Liu { 2079b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 2080b4c625c6SSong Liu struct r5conf *conf = mddev->private; 2081b4c625c6SSong Liu struct r5l_meta_block *mb; 2082b4c625c6SSong Liu struct r5l_payload_data_parity *payload; 20832d4f4687SSong Liu struct r5l_payload_flush *payload_flush; 2084b4c625c6SSong Liu int mb_offset; 2085b4c625c6SSong Liu sector_t log_offset; 2086b4c625c6SSong Liu sector_t stripe_sect; 2087b4c625c6SSong Liu struct stripe_head *sh; 2088b4c625c6SSong Liu int ret; 2089b4c625c6SSong Liu 2090b4c625c6SSong Liu /* 2091b4c625c6SSong Liu * for mismatch in data blocks, we will drop all data in this mb, but 2092b4c625c6SSong Liu * we will still read next mb for other data with FLUSH flag, as 2093b4c625c6SSong Liu * io_unit could finish out of order. 2094b4c625c6SSong Liu */ 2095b4c625c6SSong Liu ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx); 2096b4c625c6SSong Liu if (ret == -EINVAL) 2097b4c625c6SSong Liu return -EAGAIN; 2098b4c625c6SSong Liu else if (ret) 2099b4c625c6SSong Liu return ret; /* -ENOMEM duo to alloc_page() failed */ 2100b4c625c6SSong Liu 2101b4c625c6SSong Liu mb = page_address(ctx->meta_page); 2102b4c625c6SSong Liu mb_offset = sizeof(struct r5l_meta_block); 2103b4c625c6SSong Liu log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2104b4c625c6SSong Liu 2105b4c625c6SSong Liu while (mb_offset < le32_to_cpu(mb->meta_size)) { 2106b4c625c6SSong Liu int dd; 2107b4c625c6SSong Liu 2108b4c625c6SSong Liu payload = (void *)mb + mb_offset; 21092d4f4687SSong Liu payload_flush = (void *)mb + mb_offset; 21102d4f4687SSong Liu 21111ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 21122d4f4687SSong Liu int i, count; 21132d4f4687SSong Liu 21142d4f4687SSong Liu count = le32_to_cpu(payload_flush->size) / sizeof(__le64); 21152d4f4687SSong Liu for (i = 0; i < count; ++i) { 21162d4f4687SSong Liu stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]); 21172d4f4687SSong Liu sh = r5c_recovery_lookup_stripe(cached_stripe_list, 21182d4f4687SSong Liu stripe_sect); 21192d4f4687SSong Liu if (sh) { 21202d4f4687SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 21212d4f4687SSong Liu r5l_recovery_reset_stripe(sh); 21222d4f4687SSong Liu list_del_init(&sh->lru); 21232d4f4687SSong Liu raid5_release_stripe(sh); 21242d4f4687SSong Liu } 21252d4f4687SSong Liu } 21262d4f4687SSong Liu 21272d4f4687SSong Liu mb_offset += sizeof(struct r5l_payload_flush) + 21282d4f4687SSong Liu le32_to_cpu(payload_flush->size); 21292d4f4687SSong Liu continue; 21302d4f4687SSong Liu } 21312d4f4687SSong Liu 21322d4f4687SSong Liu /* DATA or PARITY payload */ 21331ad45a9bSJason Yan stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ? 2134b4c625c6SSong Liu raid5_compute_sector( 2135b4c625c6SSong Liu conf, le64_to_cpu(payload->location), 0, &dd, 2136b4c625c6SSong Liu NULL) 2137b4c625c6SSong Liu : le64_to_cpu(payload->location); 2138b4c625c6SSong Liu 2139b4c625c6SSong Liu sh = r5c_recovery_lookup_stripe(cached_stripe_list, 2140b4c625c6SSong Liu stripe_sect); 2141b4c625c6SSong Liu 2142b4c625c6SSong Liu if (!sh) { 2143483cbbedSAlexei Naberezhnov sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1); 2144b4c625c6SSong Liu /* 2145b4c625c6SSong Liu * cannot get stripe from raid5_get_active_stripe 2146b4c625c6SSong Liu * try replay some stripes 2147b4c625c6SSong Liu */ 2148b4c625c6SSong Liu if (!sh) { 2149b4c625c6SSong Liu r5c_recovery_replay_stripes( 2150b4c625c6SSong Liu cached_stripe_list, ctx); 2151b4c625c6SSong Liu sh = r5c_recovery_alloc_stripe( 2152483cbbedSAlexei Naberezhnov conf, stripe_sect, 1); 2153b4c625c6SSong Liu } 2154b4c625c6SSong Liu if (!sh) { 2155483cbbedSAlexei Naberezhnov int new_size = conf->min_nr_stripes * 2; 2156b4c625c6SSong Liu pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2157b4c625c6SSong Liu mdname(mddev), 2158483cbbedSAlexei Naberezhnov new_size); 2159483cbbedSAlexei Naberezhnov ret = raid5_set_cache_size(mddev, new_size); 2160483cbbedSAlexei Naberezhnov if (conf->min_nr_stripes <= new_size / 2) { 2161483cbbedSAlexei Naberezhnov pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n", 2162483cbbedSAlexei Naberezhnov mdname(mddev), 2163483cbbedSAlexei Naberezhnov ret, 2164483cbbedSAlexei Naberezhnov new_size, 2165483cbbedSAlexei Naberezhnov conf->min_nr_stripes, 2166483cbbedSAlexei Naberezhnov conf->max_nr_stripes); 2167483cbbedSAlexei Naberezhnov return -ENOMEM; 2168483cbbedSAlexei Naberezhnov } 2169483cbbedSAlexei Naberezhnov sh = r5c_recovery_alloc_stripe( 2170483cbbedSAlexei Naberezhnov conf, stripe_sect, 0); 2171b4c625c6SSong Liu } 2172b4c625c6SSong Liu if (!sh) { 2173b4c625c6SSong Liu pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2174b4c625c6SSong Liu mdname(mddev)); 2175b4c625c6SSong Liu return -ENOMEM; 2176b4c625c6SSong Liu } 2177b4c625c6SSong Liu list_add_tail(&sh->lru, cached_stripe_list); 2178b4c625c6SSong Liu } 2179b4c625c6SSong Liu 21801ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { 2181f7b7bee7SZhengyuan Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 2182f7b7bee7SZhengyuan Liu test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { 2183b4c625c6SSong Liu r5l_recovery_replay_one_stripe(conf, sh, ctx); 2184b4c625c6SSong Liu list_move_tail(&sh->lru, cached_stripe_list); 2185b4c625c6SSong Liu } 2186b4c625c6SSong Liu r5l_recovery_load_data(log, sh, ctx, payload, 2187b4c625c6SSong Liu log_offset); 21881ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) 2189b4c625c6SSong Liu r5l_recovery_load_parity(log, sh, ctx, payload, 2190b4c625c6SSong Liu log_offset); 2191b4c625c6SSong Liu else 2192b4c625c6SSong Liu return -EINVAL; 2193b4c625c6SSong Liu 2194b4c625c6SSong Liu log_offset = r5l_ring_add(log, log_offset, 2195b4c625c6SSong Liu le32_to_cpu(payload->size)); 2196b4c625c6SSong Liu 2197b4c625c6SSong Liu mb_offset += sizeof(struct r5l_payload_data_parity) + 2198b4c625c6SSong Liu sizeof(__le32) * 2199b4c625c6SSong Liu (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); 2200b4c625c6SSong Liu } 2201b4c625c6SSong Liu 2202b4c625c6SSong Liu return 0; 2203b4c625c6SSong Liu } 2204b4c625c6SSong Liu 2205b4c625c6SSong Liu /* 2206b4c625c6SSong Liu * Load the stripe into cache. The stripe will be written out later by 2207b4c625c6SSong Liu * the stripe cache state machine. 2208b4c625c6SSong Liu */ 2209b4c625c6SSong Liu static void r5c_recovery_load_one_stripe(struct r5l_log *log, 2210b4c625c6SSong Liu struct stripe_head *sh) 2211b4c625c6SSong Liu { 2212b4c625c6SSong Liu struct r5dev *dev; 2213b4c625c6SSong Liu int i; 2214b4c625c6SSong Liu 2215b4c625c6SSong Liu for (i = sh->disks; i--; ) { 2216b4c625c6SSong Liu dev = sh->dev + i; 2217b4c625c6SSong Liu if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) { 2218b4c625c6SSong Liu set_bit(R5_InJournal, &dev->flags); 2219b4c625c6SSong Liu set_bit(R5_UPTODATE, &dev->flags); 2220b4c625c6SSong Liu } 2221b4c625c6SSong Liu } 2222b4c625c6SSong Liu } 2223b4c625c6SSong Liu 2224b4c625c6SSong Liu /* 2225b4c625c6SSong Liu * Scan through the log for all to-be-flushed data 2226b4c625c6SSong Liu * 2227b4c625c6SSong Liu * For stripes with data and parity, namely Data-Parity stripe 2228b4c625c6SSong Liu * (STRIPE_R5C_CACHING == 0), we simply replay all the writes. 2229b4c625c6SSong Liu * 2230b4c625c6SSong Liu * For stripes with only data, namely Data-Only stripe 2231b4c625c6SSong Liu * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine. 2232b4c625c6SSong Liu * 2233b4c625c6SSong Liu * For a stripe, if we see data after parity, we should discard all previous 2234b4c625c6SSong Liu * data and parity for this stripe, as these data are already flushed to 2235b4c625c6SSong Liu * the array. 2236b4c625c6SSong Liu * 2237b4c625c6SSong Liu * At the end of the scan, we return the new journal_tail, which points to 2238b4c625c6SSong Liu * first data-only stripe on the journal device, or next invalid meta block. 2239b4c625c6SSong Liu */ 2240b4c625c6SSong Liu static int r5c_recovery_flush_log(struct r5l_log *log, 2241b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 2242b4c625c6SSong Liu { 2243bc8f167fSJackieLiu struct stripe_head *sh; 2244b4c625c6SSong Liu int ret = 0; 2245b4c625c6SSong Liu 2246b4c625c6SSong Liu /* scan through the log */ 2247b4c625c6SSong Liu while (1) { 2248b4c625c6SSong Liu if (r5l_recovery_read_meta_block(log, ctx)) 2249b4c625c6SSong Liu break; 2250b4c625c6SSong Liu 2251b4c625c6SSong Liu ret = r5c_recovery_analyze_meta_block(log, ctx, 2252b4c625c6SSong Liu &ctx->cached_list); 2253b4c625c6SSong Liu /* 2254b4c625c6SSong Liu * -EAGAIN means mismatch in data block, in this case, we still 2255b4c625c6SSong Liu * try scan the next metablock 2256b4c625c6SSong Liu */ 2257b4c625c6SSong Liu if (ret && ret != -EAGAIN) 2258b4c625c6SSong Liu break; /* ret == -EINVAL or -ENOMEM */ 2259b4c625c6SSong Liu ctx->seq++; 2260b4c625c6SSong Liu ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); 2261b4c625c6SSong Liu } 2262b4c625c6SSong Liu 2263b4c625c6SSong Liu if (ret == -ENOMEM) { 2264b4c625c6SSong Liu r5c_recovery_drop_stripes(&ctx->cached_list, ctx); 2265b4c625c6SSong Liu return ret; 2266b4c625c6SSong Liu } 2267b4c625c6SSong Liu 2268b4c625c6SSong Liu /* replay data-parity stripes */ 2269b4c625c6SSong Liu r5c_recovery_replay_stripes(&ctx->cached_list, ctx); 2270b4c625c6SSong Liu 2271b4c625c6SSong Liu /* load data-only stripes to stripe cache */ 2272bc8f167fSJackieLiu list_for_each_entry(sh, &ctx->cached_list, lru) { 2273b4c625c6SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 2274b4c625c6SSong Liu r5c_recovery_load_one_stripe(log, sh); 2275b4c625c6SSong Liu ctx->data_only_stripes++; 2276b4c625c6SSong Liu } 2277b4c625c6SSong Liu 2278b4c625c6SSong Liu return 0; 2279b4c625c6SSong Liu } 2280355810d1SShaohua Li 2281355810d1SShaohua Li /* 2282355810d1SShaohua Li * we did a recovery. Now ctx.pos points to an invalid meta block. New 2283355810d1SShaohua Li * log will start here. but we can't let superblock point to last valid 2284355810d1SShaohua Li * meta block. The log might looks like: 2285355810d1SShaohua Li * | meta 1| meta 2| meta 3| 2286355810d1SShaohua Li * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If 2287355810d1SShaohua Li * superblock points to meta 1, we write a new valid meta 2n. if crash 2288355810d1SShaohua Li * happens again, new recovery will start from meta 1. Since meta 2n is 2289355810d1SShaohua Li * valid now, recovery will think meta 3 is valid, which is wrong. 2290355810d1SShaohua Li * The solution is we create a new meta in meta2 with its seq == meta 22913c6edc66SSong Liu * 1's seq + 10000 and let superblock points to meta2. The same recovery 22923c6edc66SSong Liu * will not think meta 3 is a valid meta, because its seq doesn't match 2293355810d1SShaohua Li */ 2294355810d1SShaohua Li 2295b4c625c6SSong Liu /* 2296b4c625c6SSong Liu * Before recovery, the log looks like the following 2297b4c625c6SSong Liu * 2298b4c625c6SSong Liu * --------------------------------------------- 2299b4c625c6SSong Liu * | valid log | invalid log | 2300b4c625c6SSong Liu * --------------------------------------------- 2301b4c625c6SSong Liu * ^ 2302b4c625c6SSong Liu * |- log->last_checkpoint 2303b4c625c6SSong Liu * |- log->last_cp_seq 2304b4c625c6SSong Liu * 2305b4c625c6SSong Liu * Now we scan through the log until we see invalid entry 2306b4c625c6SSong Liu * 2307b4c625c6SSong Liu * --------------------------------------------- 2308b4c625c6SSong Liu * | valid log | invalid log | 2309b4c625c6SSong Liu * --------------------------------------------- 2310b4c625c6SSong Liu * ^ ^ 2311b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos 2312b4c625c6SSong Liu * |- log->last_cp_seq |- ctx->seq 2313b4c625c6SSong Liu * 2314b4c625c6SSong Liu * From this point, we need to increase seq number by 10 to avoid 2315b4c625c6SSong Liu * confusing next recovery. 2316b4c625c6SSong Liu * 2317b4c625c6SSong Liu * --------------------------------------------- 2318b4c625c6SSong Liu * | valid log | invalid log | 2319b4c625c6SSong Liu * --------------------------------------------- 2320b4c625c6SSong Liu * ^ ^ 2321b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+1 23223c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10001 2323b4c625c6SSong Liu * 2324b4c625c6SSong Liu * However, it is not safe to start the state machine yet, because data only 2325b4c625c6SSong Liu * parities are not yet secured in RAID. To save these data only parities, we 2326b4c625c6SSong Liu * rewrite them from seq+11. 2327b4c625c6SSong Liu * 2328b4c625c6SSong Liu * ----------------------------------------------------------------- 2329b4c625c6SSong Liu * | valid log | data only stripes | invalid log | 2330b4c625c6SSong Liu * ----------------------------------------------------------------- 2331b4c625c6SSong Liu * ^ ^ 2332b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+n 23333c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10000+n 2334b4c625c6SSong Liu * 2335b4c625c6SSong Liu * If failure happens again during this process, the recovery can safe start 2336b4c625c6SSong Liu * again from log->last_checkpoint. 2337b4c625c6SSong Liu * 2338b4c625c6SSong Liu * Once data only stripes are rewritten to journal, we move log_tail 2339b4c625c6SSong Liu * 2340b4c625c6SSong Liu * ----------------------------------------------------------------- 2341b4c625c6SSong Liu * | old log | data only stripes | invalid log | 2342b4c625c6SSong Liu * ----------------------------------------------------------------- 2343b4c625c6SSong Liu * ^ ^ 2344b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+n 23453c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10000+n 2346b4c625c6SSong Liu * 2347b4c625c6SSong Liu * Then we can safely start the state machine. If failure happens from this 2348b4c625c6SSong Liu * point on, the recovery will start from new log->last_checkpoint. 2349b4c625c6SSong Liu */ 2350b4c625c6SSong Liu static int 2351b4c625c6SSong Liu r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, 2352b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 2353b4c625c6SSong Liu { 2354a85dd7b8SSong Liu struct stripe_head *sh; 2355b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 2356b4c625c6SSong Liu struct page *page; 23573c66abbaSSong Liu sector_t next_checkpoint = MaxSector; 2358b4c625c6SSong Liu 2359b4c625c6SSong Liu page = alloc_page(GFP_KERNEL); 2360b4c625c6SSong Liu if (!page) { 2361b4c625c6SSong Liu pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n", 2362b4c625c6SSong Liu mdname(mddev)); 2363b4c625c6SSong Liu return -ENOMEM; 2364b4c625c6SSong Liu } 2365b4c625c6SSong Liu 23663c66abbaSSong Liu WARN_ON(list_empty(&ctx->cached_list)); 23673c66abbaSSong Liu 2368a85dd7b8SSong Liu list_for_each_entry(sh, &ctx->cached_list, lru) { 2369b4c625c6SSong Liu struct r5l_meta_block *mb; 2370b4c625c6SSong Liu int i; 2371b4c625c6SSong Liu int offset; 2372b4c625c6SSong Liu sector_t write_pos; 2373b4c625c6SSong Liu 2374b4c625c6SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 2375b4c625c6SSong Liu r5l_recovery_create_empty_meta_block(log, page, 2376b4c625c6SSong Liu ctx->pos, ctx->seq); 2377b4c625c6SSong Liu mb = page_address(page); 2378b4c625c6SSong Liu offset = le32_to_cpu(mb->meta_size); 2379fc833c2aSJackieLiu write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2380b4c625c6SSong Liu 2381b4c625c6SSong Liu for (i = sh->disks; i--; ) { 2382b4c625c6SSong Liu struct r5dev *dev = &sh->dev[i]; 2383b4c625c6SSong Liu struct r5l_payload_data_parity *payload; 2384b4c625c6SSong Liu void *addr; 2385b4c625c6SSong Liu 2386b4c625c6SSong Liu if (test_bit(R5_InJournal, &dev->flags)) { 2387b4c625c6SSong Liu payload = (void *)mb + offset; 2388b4c625c6SSong Liu payload->header.type = cpu_to_le16( 2389b4c625c6SSong Liu R5LOG_PAYLOAD_DATA); 23901ad45a9bSJason Yan payload->size = cpu_to_le32(BLOCK_SECTORS); 2391b4c625c6SSong Liu payload->location = cpu_to_le64( 2392b4c625c6SSong Liu raid5_compute_blocknr(sh, i, 0)); 2393b4c625c6SSong Liu addr = kmap_atomic(dev->page); 2394b4c625c6SSong Liu payload->checksum[0] = cpu_to_le32( 2395b4c625c6SSong Liu crc32c_le(log->uuid_checksum, addr, 2396b4c625c6SSong Liu PAGE_SIZE)); 2397b4c625c6SSong Liu kunmap_atomic(addr); 2398b4c625c6SSong Liu sync_page_io(log->rdev, write_pos, PAGE_SIZE, 2399b4c625c6SSong Liu dev->page, REQ_OP_WRITE, 0, false); 2400b4c625c6SSong Liu write_pos = r5l_ring_add(log, write_pos, 2401b4c625c6SSong Liu BLOCK_SECTORS); 2402b4c625c6SSong Liu offset += sizeof(__le32) + 2403b4c625c6SSong Liu sizeof(struct r5l_payload_data_parity); 2404b4c625c6SSong Liu 2405b4c625c6SSong Liu } 2406b4c625c6SSong Liu } 2407b4c625c6SSong Liu mb->meta_size = cpu_to_le32(offset); 24085c88f403SSong Liu mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 24095c88f403SSong Liu mb, PAGE_SIZE)); 2410b4c625c6SSong Liu sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, 24115a8948f8SJan Kara REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); 2412b4c625c6SSong Liu sh->log_start = ctx->pos; 24133c66abbaSSong Liu list_add_tail(&sh->r5c, &log->stripe_in_journal_list); 24143c66abbaSSong Liu atomic_inc(&log->stripe_in_journal_count); 2415b4c625c6SSong Liu ctx->pos = write_pos; 2416b4c625c6SSong Liu ctx->seq += 1; 24173c66abbaSSong Liu next_checkpoint = sh->log_start; 2418b4c625c6SSong Liu } 24193c66abbaSSong Liu log->next_checkpoint = next_checkpoint; 2420b4c625c6SSong Liu __free_page(page); 2421b4c625c6SSong Liu return 0; 2422b4c625c6SSong Liu } 2423b4c625c6SSong Liu 2424a85dd7b8SSong Liu static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, 2425a85dd7b8SSong Liu struct r5l_recovery_ctx *ctx) 2426a85dd7b8SSong Liu { 2427a85dd7b8SSong Liu struct mddev *mddev = log->rdev->mddev; 2428a85dd7b8SSong Liu struct r5conf *conf = mddev->private; 2429a85dd7b8SSong Liu struct stripe_head *sh, *next; 2430c9020e64SSong Liu bool cleared_pending = false; 2431a85dd7b8SSong Liu 2432a85dd7b8SSong Liu if (ctx->data_only_stripes == 0) 2433a85dd7b8SSong Liu return; 2434a85dd7b8SSong Liu 2435c9020e64SSong Liu if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2436c9020e64SSong Liu cleared_pending = true; 2437c9020e64SSong Liu clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2438c9020e64SSong Liu } 2439a85dd7b8SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK; 2440a85dd7b8SSong Liu 2441a85dd7b8SSong Liu list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { 2442a85dd7b8SSong Liu r5c_make_stripe_write_out(sh); 2443a85dd7b8SSong Liu set_bit(STRIPE_HANDLE, &sh->state); 2444a85dd7b8SSong Liu list_del_init(&sh->lru); 2445a85dd7b8SSong Liu raid5_release_stripe(sh); 2446a85dd7b8SSong Liu } 2447a85dd7b8SSong Liu 2448a85dd7b8SSong Liu /* reuse conf->wait_for_quiescent in recovery */ 2449a85dd7b8SSong Liu wait_event(conf->wait_for_quiescent, 2450a85dd7b8SSong Liu atomic_read(&conf->active_stripes) == 0); 2451a85dd7b8SSong Liu 2452a85dd7b8SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 2453c9020e64SSong Liu if (cleared_pending) 2454c9020e64SSong Liu set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2455a85dd7b8SSong Liu } 2456a85dd7b8SSong Liu 2457f6bed0efSShaohua Li static int r5l_recovery_log(struct r5l_log *log) 2458f6bed0efSShaohua Li { 24595aabf7c4SSong Liu struct mddev *mddev = log->rdev->mddev; 2460effe6ee7SSong Liu struct r5l_recovery_ctx *ctx; 24615aabf7c4SSong Liu int ret; 246243b96748SJackieLiu sector_t pos; 2463355810d1SShaohua Li 2464effe6ee7SSong Liu ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 2465effe6ee7SSong Liu if (!ctx) 2466355810d1SShaohua Li return -ENOMEM; 2467355810d1SShaohua Li 2468effe6ee7SSong Liu ctx->pos = log->last_checkpoint; 2469effe6ee7SSong Liu ctx->seq = log->last_cp_seq; 2470effe6ee7SSong Liu INIT_LIST_HEAD(&ctx->cached_list); 2471effe6ee7SSong Liu ctx->meta_page = alloc_page(GFP_KERNEL); 2472effe6ee7SSong Liu 2473effe6ee7SSong Liu if (!ctx->meta_page) { 2474effe6ee7SSong Liu ret = -ENOMEM; 2475effe6ee7SSong Liu goto meta_page; 2476effe6ee7SSong Liu } 2477effe6ee7SSong Liu 2478effe6ee7SSong Liu if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) { 2479effe6ee7SSong Liu ret = -ENOMEM; 2480effe6ee7SSong Liu goto ra_pool; 2481effe6ee7SSong Liu } 2482effe6ee7SSong Liu 2483effe6ee7SSong Liu ret = r5c_recovery_flush_log(log, ctx); 2484355810d1SShaohua Li 2485355810d1SShaohua Li if (ret) 2486effe6ee7SSong Liu goto error; 24875aabf7c4SSong Liu 2488effe6ee7SSong Liu pos = ctx->pos; 2489effe6ee7SSong Liu ctx->seq += 10000; 249043b96748SJackieLiu 2491effe6ee7SSong Liu if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0)) 249292e6245dSSong Liu pr_info("md/raid:%s: starting from clean shutdown\n", 24935aabf7c4SSong Liu mdname(mddev)); 2494a85dd7b8SSong Liu else 249592e6245dSSong Liu pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", 2496effe6ee7SSong Liu mdname(mddev), ctx->data_only_stripes, 2497effe6ee7SSong Liu ctx->data_parity_stripes); 24985aabf7c4SSong Liu 2499effe6ee7SSong Liu if (ctx->data_only_stripes == 0) { 2500effe6ee7SSong Liu log->next_checkpoint = ctx->pos; 2501effe6ee7SSong Liu r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++); 2502effe6ee7SSong Liu ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2503effe6ee7SSong Liu } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) { 25045aabf7c4SSong Liu pr_err("md/raid:%s: failed to rewrite stripes to journal\n", 25055aabf7c4SSong Liu mdname(mddev)); 2506effe6ee7SSong Liu ret = -EIO; 2507effe6ee7SSong Liu goto error; 25085aabf7c4SSong Liu } 25095aabf7c4SSong Liu 2510effe6ee7SSong Liu log->log_start = ctx->pos; 2511effe6ee7SSong Liu log->seq = ctx->seq; 251243b96748SJackieLiu log->last_checkpoint = pos; 251343b96748SJackieLiu r5l_write_super(log, pos); 2514a85dd7b8SSong Liu 2515effe6ee7SSong Liu r5c_recovery_flush_data_only_stripes(log, ctx); 2516effe6ee7SSong Liu ret = 0; 2517effe6ee7SSong Liu error: 2518effe6ee7SSong Liu r5l_recovery_free_ra_pool(log, ctx); 2519effe6ee7SSong Liu ra_pool: 2520effe6ee7SSong Liu __free_page(ctx->meta_page); 2521effe6ee7SSong Liu meta_page: 2522effe6ee7SSong Liu kfree(ctx); 2523effe6ee7SSong Liu return ret; 2524f6bed0efSShaohua Li } 2525f6bed0efSShaohua Li 2526f6bed0efSShaohua Li static void r5l_write_super(struct r5l_log *log, sector_t cp) 2527f6bed0efSShaohua Li { 2528f6bed0efSShaohua Li struct mddev *mddev = log->rdev->mddev; 2529f6bed0efSShaohua Li 2530f6bed0efSShaohua Li log->rdev->journal_tail = cp; 25312953079cSShaohua Li set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2532f6bed0efSShaohua Li } 2533f6bed0efSShaohua Li 25342c7da14bSSong Liu static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) 25352c7da14bSSong Liu { 2536a72cbf83SSong Liu struct r5conf *conf; 25372c7da14bSSong Liu int ret; 25382c7da14bSSong Liu 253901b5d32aSGuoqing Jiang spin_lock(&mddev->lock); 2540a72cbf83SSong Liu conf = mddev->private; 2541a72cbf83SSong Liu if (!conf || !conf->log) { 254201b5d32aSGuoqing Jiang spin_unlock(&mddev->lock); 25432c7da14bSSong Liu return 0; 2544a72cbf83SSong Liu } 25452c7da14bSSong Liu 25462c7da14bSSong Liu switch (conf->log->r5c_journal_mode) { 25472c7da14bSSong Liu case R5C_JOURNAL_MODE_WRITE_THROUGH: 25482c7da14bSSong Liu ret = snprintf( 25492c7da14bSSong Liu page, PAGE_SIZE, "[%s] %s\n", 25502c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], 25512c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); 25522c7da14bSSong Liu break; 25532c7da14bSSong Liu case R5C_JOURNAL_MODE_WRITE_BACK: 25542c7da14bSSong Liu ret = snprintf( 25552c7da14bSSong Liu page, PAGE_SIZE, "%s [%s]\n", 25562c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], 25572c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); 25582c7da14bSSong Liu break; 25592c7da14bSSong Liu default: 25602c7da14bSSong Liu ret = 0; 25612c7da14bSSong Liu } 256201b5d32aSGuoqing Jiang spin_unlock(&mddev->lock); 25632c7da14bSSong Liu return ret; 25642c7da14bSSong Liu } 25652c7da14bSSong Liu 256678e470c2SHeinz Mauelshagen /* 256778e470c2SHeinz Mauelshagen * Set journal cache mode on @mddev (external API initially needed by dm-raid). 256878e470c2SHeinz Mauelshagen * 256978e470c2SHeinz Mauelshagen * @mode as defined in 'enum r5c_journal_mode'. 257078e470c2SHeinz Mauelshagen * 257178e470c2SHeinz Mauelshagen */ 257278e470c2SHeinz Mauelshagen int r5c_journal_mode_set(struct mddev *mddev, int mode) 25732c7da14bSSong Liu { 2574b44886c5SSong Liu struct r5conf *conf; 25752c7da14bSSong Liu 257678e470c2SHeinz Mauelshagen if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || 257778e470c2SHeinz Mauelshagen mode > R5C_JOURNAL_MODE_WRITE_BACK) 25782c7da14bSSong Liu return -EINVAL; 25792c7da14bSSong Liu 2580b44886c5SSong Liu conf = mddev->private; 2581ff35f58eSSong Liu if (!conf || !conf->log) 2582b44886c5SSong Liu return -ENODEV; 2583b44886c5SSong Liu 25842e38a37fSSong Liu if (raid5_calc_degraded(conf) > 0 && 2585ff35f58eSSong Liu mode == R5C_JOURNAL_MODE_WRITE_BACK) 25862e38a37fSSong Liu return -EINVAL; 25872e38a37fSSong Liu 25882c7da14bSSong Liu mddev_suspend(mddev); 258978e470c2SHeinz Mauelshagen conf->log->r5c_journal_mode = mode; 25902c7da14bSSong Liu mddev_resume(mddev); 25912c7da14bSSong Liu 25922c7da14bSSong Liu pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", 259378e470c2SHeinz Mauelshagen mdname(mddev), mode, r5c_journal_mode_str[mode]); 259478e470c2SHeinz Mauelshagen return 0; 259578e470c2SHeinz Mauelshagen } 259678e470c2SHeinz Mauelshagen EXPORT_SYMBOL(r5c_journal_mode_set); 259778e470c2SHeinz Mauelshagen 259878e470c2SHeinz Mauelshagen static ssize_t r5c_journal_mode_store(struct mddev *mddev, 259978e470c2SHeinz Mauelshagen const char *page, size_t length) 260078e470c2SHeinz Mauelshagen { 260178e470c2SHeinz Mauelshagen int mode = ARRAY_SIZE(r5c_journal_mode_str); 260278e470c2SHeinz Mauelshagen size_t len = length; 2603ff35f58eSSong Liu int ret; 260478e470c2SHeinz Mauelshagen 260578e470c2SHeinz Mauelshagen if (len < 2) 260678e470c2SHeinz Mauelshagen return -EINVAL; 260778e470c2SHeinz Mauelshagen 260878e470c2SHeinz Mauelshagen if (page[len - 1] == '\n') 260978e470c2SHeinz Mauelshagen len--; 261078e470c2SHeinz Mauelshagen 261178e470c2SHeinz Mauelshagen while (mode--) 261278e470c2SHeinz Mauelshagen if (strlen(r5c_journal_mode_str[mode]) == len && 261378e470c2SHeinz Mauelshagen !strncmp(page, r5c_journal_mode_str[mode], len)) 261478e470c2SHeinz Mauelshagen break; 2615ff35f58eSSong Liu ret = mddev_lock(mddev); 2616ff35f58eSSong Liu if (ret) 2617ff35f58eSSong Liu return ret; 2618ff35f58eSSong Liu ret = r5c_journal_mode_set(mddev, mode); 2619ff35f58eSSong Liu mddev_unlock(mddev); 2620ff35f58eSSong Liu return ret ?: length; 26212c7da14bSSong Liu } 26222c7da14bSSong Liu 26232c7da14bSSong Liu struct md_sysfs_entry 26242c7da14bSSong Liu r5c_journal_mode = __ATTR(journal_mode, 0644, 26252c7da14bSSong Liu r5c_journal_mode_show, r5c_journal_mode_store); 26262c7da14bSSong Liu 26272ded3703SSong Liu /* 26282ded3703SSong Liu * Try handle write operation in caching phase. This function should only 26292ded3703SSong Liu * be called in write-back mode. 26302ded3703SSong Liu * 26312ded3703SSong Liu * If all outstanding writes can be handled in caching phase, returns 0 26322ded3703SSong Liu * If writes requires write-out phase, call r5c_make_stripe_write_out() 26332ded3703SSong Liu * and returns -EAGAIN 26342ded3703SSong Liu */ 26352ded3703SSong Liu int r5c_try_caching_write(struct r5conf *conf, 26362ded3703SSong Liu struct stripe_head *sh, 26372ded3703SSong Liu struct stripe_head_state *s, 26382ded3703SSong Liu int disks) 26392ded3703SSong Liu { 26402ded3703SSong Liu struct r5l_log *log = conf->log; 26411e6d690bSSong Liu int i; 26421e6d690bSSong Liu struct r5dev *dev; 26431e6d690bSSong Liu int to_cache = 0; 264403b047f4SSong Liu void **pslot; 264503b047f4SSong Liu sector_t tree_index; 264603b047f4SSong Liu int ret; 264703b047f4SSong Liu uintptr_t refcount; 26482ded3703SSong Liu 26492ded3703SSong Liu BUG_ON(!r5c_is_writeback(log)); 26502ded3703SSong Liu 26511e6d690bSSong Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { 26521e6d690bSSong Liu /* 26531e6d690bSSong Liu * There are two different scenarios here: 26541e6d690bSSong Liu * 1. The stripe has some data cached, and it is sent to 26551e6d690bSSong Liu * write-out phase for reclaim 26561e6d690bSSong Liu * 2. The stripe is clean, and this is the first write 26571e6d690bSSong Liu * 26581e6d690bSSong Liu * For 1, return -EAGAIN, so we continue with 26591e6d690bSSong Liu * handle_stripe_dirtying(). 26601e6d690bSSong Liu * 26611e6d690bSSong Liu * For 2, set STRIPE_R5C_CACHING and continue with caching 26621e6d690bSSong Liu * write. 26631e6d690bSSong Liu */ 26641e6d690bSSong Liu 26651e6d690bSSong Liu /* case 1: anything injournal or anything in written */ 26661e6d690bSSong Liu if (s->injournal > 0 || s->written > 0) 26671e6d690bSSong Liu return -EAGAIN; 26681e6d690bSSong Liu /* case 2 */ 26691e6d690bSSong Liu set_bit(STRIPE_R5C_CACHING, &sh->state); 26701e6d690bSSong Liu } 26711e6d690bSSong Liu 26722e38a37fSSong Liu /* 26732e38a37fSSong Liu * When run in degraded mode, array is set to write-through mode. 26742e38a37fSSong Liu * This check helps drain pending write safely in the transition to 26752e38a37fSSong Liu * write-through mode. 26765ddf0440SSong Liu * 26775ddf0440SSong Liu * When a stripe is syncing, the write is also handled in write 26785ddf0440SSong Liu * through mode. 26792e38a37fSSong Liu */ 26805ddf0440SSong Liu if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) { 26812e38a37fSSong Liu r5c_make_stripe_write_out(sh); 26822e38a37fSSong Liu return -EAGAIN; 26832e38a37fSSong Liu } 26842e38a37fSSong Liu 26851e6d690bSSong Liu for (i = disks; i--; ) { 26861e6d690bSSong Liu dev = &sh->dev[i]; 26871e6d690bSSong Liu /* if non-overwrite, use writing-out phase */ 26881e6d690bSSong Liu if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) && 26891e6d690bSSong Liu !test_bit(R5_InJournal, &dev->flags)) { 26902ded3703SSong Liu r5c_make_stripe_write_out(sh); 26912ded3703SSong Liu return -EAGAIN; 26922ded3703SSong Liu } 26931e6d690bSSong Liu } 26941e6d690bSSong Liu 269503b047f4SSong Liu /* if the stripe is not counted in big_stripe_tree, add it now */ 269603b047f4SSong Liu if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && 269703b047f4SSong Liu !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 269803b047f4SSong Liu tree_index = r5c_tree_index(conf, sh->sector); 269903b047f4SSong Liu spin_lock(&log->tree_lock); 270003b047f4SSong Liu pslot = radix_tree_lookup_slot(&log->big_stripe_tree, 270103b047f4SSong Liu tree_index); 270203b047f4SSong Liu if (pslot) { 270303b047f4SSong Liu refcount = (uintptr_t)radix_tree_deref_slot_protected( 270403b047f4SSong Liu pslot, &log->tree_lock) >> 270503b047f4SSong Liu R5C_RADIX_COUNT_SHIFT; 270603b047f4SSong Liu radix_tree_replace_slot( 270703b047f4SSong Liu &log->big_stripe_tree, pslot, 270803b047f4SSong Liu (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT)); 270903b047f4SSong Liu } else { 271003b047f4SSong Liu /* 271103b047f4SSong Liu * this radix_tree_insert can fail safely, so no 271203b047f4SSong Liu * need to call radix_tree_preload() 271303b047f4SSong Liu */ 271403b047f4SSong Liu ret = radix_tree_insert( 271503b047f4SSong Liu &log->big_stripe_tree, tree_index, 271603b047f4SSong Liu (void *)(1 << R5C_RADIX_COUNT_SHIFT)); 271703b047f4SSong Liu if (ret) { 271803b047f4SSong Liu spin_unlock(&log->tree_lock); 271903b047f4SSong Liu r5c_make_stripe_write_out(sh); 272003b047f4SSong Liu return -EAGAIN; 272103b047f4SSong Liu } 272203b047f4SSong Liu } 272303b047f4SSong Liu spin_unlock(&log->tree_lock); 272403b047f4SSong Liu 272503b047f4SSong Liu /* 272603b047f4SSong Liu * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is 272703b047f4SSong Liu * counted in the radix tree 272803b047f4SSong Liu */ 272903b047f4SSong Liu set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state); 273003b047f4SSong Liu atomic_inc(&conf->r5c_cached_partial_stripes); 273103b047f4SSong Liu } 273203b047f4SSong Liu 27331e6d690bSSong Liu for (i = disks; i--; ) { 27341e6d690bSSong Liu dev = &sh->dev[i]; 27351e6d690bSSong Liu if (dev->towrite) { 27361e6d690bSSong Liu set_bit(R5_Wantwrite, &dev->flags); 27371e6d690bSSong Liu set_bit(R5_Wantdrain, &dev->flags); 27381e6d690bSSong Liu set_bit(R5_LOCKED, &dev->flags); 27391e6d690bSSong Liu to_cache++; 27401e6d690bSSong Liu } 27411e6d690bSSong Liu } 27421e6d690bSSong Liu 27431e6d690bSSong Liu if (to_cache) { 27441e6d690bSSong Liu set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 27451e6d690bSSong Liu /* 27461e6d690bSSong Liu * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data() 27471e6d690bSSong Liu * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in 27481e6d690bSSong Liu * r5c_handle_data_cached() 27491e6d690bSSong Liu */ 27501e6d690bSSong Liu set_bit(STRIPE_LOG_TRAPPED, &sh->state); 27511e6d690bSSong Liu } 27521e6d690bSSong Liu 27531e6d690bSSong Liu return 0; 27541e6d690bSSong Liu } 27551e6d690bSSong Liu 27561e6d690bSSong Liu /* 27571e6d690bSSong Liu * free extra pages (orig_page) we allocated for prexor 27581e6d690bSSong Liu */ 27591e6d690bSSong Liu void r5c_release_extra_page(struct stripe_head *sh) 27601e6d690bSSong Liu { 2761d7bd398eSSong Liu struct r5conf *conf = sh->raid_conf; 27621e6d690bSSong Liu int i; 2763d7bd398eSSong Liu bool using_disk_info_extra_page; 2764d7bd398eSSong Liu 2765d7bd398eSSong Liu using_disk_info_extra_page = 2766d7bd398eSSong Liu sh->dev[0].orig_page == conf->disks[0].extra_page; 27671e6d690bSSong Liu 27681e6d690bSSong Liu for (i = sh->disks; i--; ) 27691e6d690bSSong Liu if (sh->dev[i].page != sh->dev[i].orig_page) { 27701e6d690bSSong Liu struct page *p = sh->dev[i].orig_page; 27711e6d690bSSong Liu 27721e6d690bSSong Liu sh->dev[i].orig_page = sh->dev[i].page; 277386aa1397SSong Liu clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); 277486aa1397SSong Liu 2775d7bd398eSSong Liu if (!using_disk_info_extra_page) 27761e6d690bSSong Liu put_page(p); 27771e6d690bSSong Liu } 2778d7bd398eSSong Liu 2779d7bd398eSSong Liu if (using_disk_info_extra_page) { 2780d7bd398eSSong Liu clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state); 2781d7bd398eSSong Liu md_wakeup_thread(conf->mddev->thread); 2782d7bd398eSSong Liu } 2783d7bd398eSSong Liu } 2784d7bd398eSSong Liu 2785d7bd398eSSong Liu void r5c_use_extra_page(struct stripe_head *sh) 2786d7bd398eSSong Liu { 2787d7bd398eSSong Liu struct r5conf *conf = sh->raid_conf; 2788d7bd398eSSong Liu int i; 2789d7bd398eSSong Liu struct r5dev *dev; 2790d7bd398eSSong Liu 2791d7bd398eSSong Liu for (i = sh->disks; i--; ) { 2792d7bd398eSSong Liu dev = &sh->dev[i]; 2793d7bd398eSSong Liu if (dev->orig_page != dev->page) 2794d7bd398eSSong Liu put_page(dev->orig_page); 2795d7bd398eSSong Liu dev->orig_page = conf->disks[i].extra_page; 2796d7bd398eSSong Liu } 27971e6d690bSSong Liu } 27982ded3703SSong Liu 27992ded3703SSong Liu /* 28002ded3703SSong Liu * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the 28012ded3703SSong Liu * stripe is committed to RAID disks. 28022ded3703SSong Liu */ 28032ded3703SSong Liu void r5c_finish_stripe_write_out(struct r5conf *conf, 28042ded3703SSong Liu struct stripe_head *sh, 28052ded3703SSong Liu struct stripe_head_state *s) 28062ded3703SSong Liu { 280703b047f4SSong Liu struct r5l_log *log = conf->log; 28081e6d690bSSong Liu int i; 28091e6d690bSSong Liu int do_wakeup = 0; 281003b047f4SSong Liu sector_t tree_index; 281103b047f4SSong Liu void **pslot; 281203b047f4SSong Liu uintptr_t refcount; 28131e6d690bSSong Liu 281403b047f4SSong Liu if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) 28152ded3703SSong Liu return; 28162ded3703SSong Liu 28172ded3703SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 28182ded3703SSong Liu clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 28192ded3703SSong Liu 282003b047f4SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 28212ded3703SSong Liu return; 28221e6d690bSSong Liu 28231e6d690bSSong Liu for (i = sh->disks; i--; ) { 28241e6d690bSSong Liu clear_bit(R5_InJournal, &sh->dev[i].flags); 28251e6d690bSSong Liu if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 28261e6d690bSSong Liu do_wakeup = 1; 28271e6d690bSSong Liu } 28281e6d690bSSong Liu 28291e6d690bSSong Liu /* 28301e6d690bSSong Liu * analyse_stripe() runs before r5c_finish_stripe_write_out(), 28311e6d690bSSong Liu * We updated R5_InJournal, so we also update s->injournal. 28321e6d690bSSong Liu */ 28331e6d690bSSong Liu s->injournal = 0; 28341e6d690bSSong Liu 28351e6d690bSSong Liu if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 28361e6d690bSSong Liu if (atomic_dec_and_test(&conf->pending_full_writes)) 28371e6d690bSSong Liu md_wakeup_thread(conf->mddev->thread); 28381e6d690bSSong Liu 28391e6d690bSSong Liu if (do_wakeup) 28401e6d690bSSong Liu wake_up(&conf->wait_for_overlap); 2841a39f7afdSSong Liu 284203b047f4SSong Liu spin_lock_irq(&log->stripe_in_journal_lock); 2843a39f7afdSSong Liu list_del_init(&sh->r5c); 284403b047f4SSong Liu spin_unlock_irq(&log->stripe_in_journal_lock); 2845a39f7afdSSong Liu sh->log_start = MaxSector; 284603b047f4SSong Liu 284703b047f4SSong Liu atomic_dec(&log->stripe_in_journal_count); 284803b047f4SSong Liu r5c_update_log_state(log); 284903b047f4SSong Liu 285003b047f4SSong Liu /* stop counting this stripe in big_stripe_tree */ 285103b047f4SSong Liu if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) || 285203b047f4SSong Liu test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 285303b047f4SSong Liu tree_index = r5c_tree_index(conf, sh->sector); 285403b047f4SSong Liu spin_lock(&log->tree_lock); 285503b047f4SSong Liu pslot = radix_tree_lookup_slot(&log->big_stripe_tree, 285603b047f4SSong Liu tree_index); 285703b047f4SSong Liu BUG_ON(pslot == NULL); 285803b047f4SSong Liu refcount = (uintptr_t)radix_tree_deref_slot_protected( 285903b047f4SSong Liu pslot, &log->tree_lock) >> 286003b047f4SSong Liu R5C_RADIX_COUNT_SHIFT; 286103b047f4SSong Liu if (refcount == 1) 286203b047f4SSong Liu radix_tree_delete(&log->big_stripe_tree, tree_index); 286303b047f4SSong Liu else 286403b047f4SSong Liu radix_tree_replace_slot( 286503b047f4SSong Liu &log->big_stripe_tree, pslot, 286603b047f4SSong Liu (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT)); 286703b047f4SSong Liu spin_unlock(&log->tree_lock); 286803b047f4SSong Liu } 286903b047f4SSong Liu 287003b047f4SSong Liu if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) { 287103b047f4SSong Liu BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0); 2872e33fbb9cSShaohua Li atomic_dec(&conf->r5c_flushing_partial_stripes); 287303b047f4SSong Liu atomic_dec(&conf->r5c_cached_partial_stripes); 287403b047f4SSong Liu } 287503b047f4SSong Liu 287603b047f4SSong Liu if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 287703b047f4SSong Liu BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0); 2878e33fbb9cSShaohua Li atomic_dec(&conf->r5c_flushing_full_stripes); 287903b047f4SSong Liu atomic_dec(&conf->r5c_cached_full_stripes); 288003b047f4SSong Liu } 2881ea17481fSSong Liu 2882ea17481fSSong Liu r5l_append_flush_payload(log, sh->sector); 28835ddf0440SSong Liu /* stripe is flused to raid disks, we can do resync now */ 28845ddf0440SSong Liu if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 28855ddf0440SSong Liu set_bit(STRIPE_HANDLE, &sh->state); 28861e6d690bSSong Liu } 28871e6d690bSSong Liu 2888ff875738SArtur Paszkiewicz int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) 28891e6d690bSSong Liu { 2890a39f7afdSSong Liu struct r5conf *conf = sh->raid_conf; 28911e6d690bSSong Liu int pages = 0; 28921e6d690bSSong Liu int reserve; 28931e6d690bSSong Liu int i; 28941e6d690bSSong Liu int ret = 0; 28951e6d690bSSong Liu 28961e6d690bSSong Liu BUG_ON(!log); 28971e6d690bSSong Liu 28981e6d690bSSong Liu for (i = 0; i < sh->disks; i++) { 28991e6d690bSSong Liu void *addr; 29001e6d690bSSong Liu 29011e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) 29021e6d690bSSong Liu continue; 29031e6d690bSSong Liu addr = kmap_atomic(sh->dev[i].page); 29041e6d690bSSong Liu sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, 29051e6d690bSSong Liu addr, PAGE_SIZE); 29061e6d690bSSong Liu kunmap_atomic(addr); 29071e6d690bSSong Liu pages++; 29081e6d690bSSong Liu } 29091e6d690bSSong Liu WARN_ON(pages == 0); 29101e6d690bSSong Liu 29111e6d690bSSong Liu /* 29121e6d690bSSong Liu * The stripe must enter state machine again to call endio, so 29131e6d690bSSong Liu * don't delay. 29141e6d690bSSong Liu */ 29151e6d690bSSong Liu clear_bit(STRIPE_DELAYED, &sh->state); 29161e6d690bSSong Liu atomic_inc(&sh->count); 29171e6d690bSSong Liu 29181e6d690bSSong Liu mutex_lock(&log->io_mutex); 29191e6d690bSSong Liu /* meta + data */ 29201e6d690bSSong Liu reserve = (1 + pages) << (PAGE_SHIFT - 9); 29211e6d690bSSong Liu 2922a39f7afdSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 2923a39f7afdSSong Liu sh->log_start == MaxSector) 2924a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 2925a39f7afdSSong Liu else if (!r5l_has_free_space(log, reserve)) { 2926a39f7afdSSong Liu if (sh->log_start == log->last_checkpoint) 2927a39f7afdSSong Liu BUG(); 2928a39f7afdSSong Liu else 2929a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 29301e6d690bSSong Liu } else { 29311e6d690bSSong Liu ret = r5l_log_stripe(log, sh, pages, 0); 29321e6d690bSSong Liu if (ret) { 29331e6d690bSSong Liu spin_lock_irq(&log->io_list_lock); 29341e6d690bSSong Liu list_add_tail(&sh->log_list, &log->no_mem_stripes); 29351e6d690bSSong Liu spin_unlock_irq(&log->io_list_lock); 29361e6d690bSSong Liu } 29371e6d690bSSong Liu } 29381e6d690bSSong Liu 29391e6d690bSSong Liu mutex_unlock(&log->io_mutex); 29401e6d690bSSong Liu return 0; 2941f6bed0efSShaohua Li } 2942f6bed0efSShaohua Li 294303b047f4SSong Liu /* check whether this big stripe is in write back cache. */ 294403b047f4SSong Liu bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect) 294503b047f4SSong Liu { 294603b047f4SSong Liu struct r5l_log *log = conf->log; 294703b047f4SSong Liu sector_t tree_index; 294803b047f4SSong Liu void *slot; 294903b047f4SSong Liu 295003b047f4SSong Liu if (!log) 295103b047f4SSong Liu return false; 295203b047f4SSong Liu 295303b047f4SSong Liu WARN_ON_ONCE(!rcu_read_lock_held()); 295403b047f4SSong Liu tree_index = r5c_tree_index(conf, sect); 295503b047f4SSong Liu slot = radix_tree_lookup(&log->big_stripe_tree, tree_index); 295603b047f4SSong Liu return slot != NULL; 295703b047f4SSong Liu } 295803b047f4SSong Liu 2959f6bed0efSShaohua Li static int r5l_load_log(struct r5l_log *log) 2960f6bed0efSShaohua Li { 2961f6bed0efSShaohua Li struct md_rdev *rdev = log->rdev; 2962f6bed0efSShaohua Li struct page *page; 2963f6bed0efSShaohua Li struct r5l_meta_block *mb; 2964f6bed0efSShaohua Li sector_t cp = log->rdev->journal_tail; 2965f6bed0efSShaohua Li u32 stored_crc, expected_crc; 2966f6bed0efSShaohua Li bool create_super = false; 2967d30dfeb9SJackieLiu int ret = 0; 2968f6bed0efSShaohua Li 2969f6bed0efSShaohua Li /* Make sure it's valid */ 2970f6bed0efSShaohua Li if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) 2971f6bed0efSShaohua Li cp = 0; 2972f6bed0efSShaohua Li page = alloc_page(GFP_KERNEL); 2973f6bed0efSShaohua Li if (!page) 2974f6bed0efSShaohua Li return -ENOMEM; 2975f6bed0efSShaohua Li 2976796a5cf0SMike Christie if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { 2977f6bed0efSShaohua Li ret = -EIO; 2978f6bed0efSShaohua Li goto ioerr; 2979f6bed0efSShaohua Li } 2980f6bed0efSShaohua Li mb = page_address(page); 2981f6bed0efSShaohua Li 2982f6bed0efSShaohua Li if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || 2983f6bed0efSShaohua Li mb->version != R5LOG_VERSION) { 2984f6bed0efSShaohua Li create_super = true; 2985f6bed0efSShaohua Li goto create; 2986f6bed0efSShaohua Li } 2987f6bed0efSShaohua Li stored_crc = le32_to_cpu(mb->checksum); 2988f6bed0efSShaohua Li mb->checksum = 0; 29895cb2fbd6SShaohua Li expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 2990f6bed0efSShaohua Li if (stored_crc != expected_crc) { 2991f6bed0efSShaohua Li create_super = true; 2992f6bed0efSShaohua Li goto create; 2993f6bed0efSShaohua Li } 2994f6bed0efSShaohua Li if (le64_to_cpu(mb->position) != cp) { 2995f6bed0efSShaohua Li create_super = true; 2996f6bed0efSShaohua Li goto create; 2997f6bed0efSShaohua Li } 2998f6bed0efSShaohua Li create: 2999f6bed0efSShaohua Li if (create_super) { 3000f6bed0efSShaohua Li log->last_cp_seq = prandom_u32(); 3001f6bed0efSShaohua Li cp = 0; 300256056c2eSZhengyuan Liu r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq); 3003f6bed0efSShaohua Li /* 3004f6bed0efSShaohua Li * Make sure super points to correct address. Log might have 3005f6bed0efSShaohua Li * data very soon. If super hasn't correct log tail address, 3006f6bed0efSShaohua Li * recovery can't find the log 3007f6bed0efSShaohua Li */ 3008f6bed0efSShaohua Li r5l_write_super(log, cp); 3009f6bed0efSShaohua Li } else 3010f6bed0efSShaohua Li log->last_cp_seq = le64_to_cpu(mb->seq); 3011f6bed0efSShaohua Li 3012f6bed0efSShaohua Li log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); 30130576b1c6SShaohua Li log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; 30140576b1c6SShaohua Li if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) 30150576b1c6SShaohua Li log->max_free_space = RECLAIM_MAX_FREE_SPACE; 3016f6bed0efSShaohua Li log->last_checkpoint = cp; 3017f6bed0efSShaohua Li 3018f6bed0efSShaohua Li __free_page(page); 3019f6bed0efSShaohua Li 3020d30dfeb9SJackieLiu if (create_super) { 3021d30dfeb9SJackieLiu log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS); 3022d30dfeb9SJackieLiu log->seq = log->last_cp_seq + 1; 3023d30dfeb9SJackieLiu log->next_checkpoint = cp; 3024d30dfeb9SJackieLiu } else 30253d7e7e1dSZhengyuan Liu ret = r5l_recovery_log(log); 3026d30dfeb9SJackieLiu 30273d7e7e1dSZhengyuan Liu r5c_update_log_state(log); 30283d7e7e1dSZhengyuan Liu return ret; 3029f6bed0efSShaohua Li ioerr: 3030f6bed0efSShaohua Li __free_page(page); 3031f6bed0efSShaohua Li return ret; 3032f6bed0efSShaohua Li } 3033f6bed0efSShaohua Li 3034d5d885fdSSong Liu int r5l_start(struct r5l_log *log) 3035d5d885fdSSong Liu { 3036d5d885fdSSong Liu int ret; 3037d5d885fdSSong Liu 3038d5d885fdSSong Liu if (!log) 3039d5d885fdSSong Liu return 0; 3040d5d885fdSSong Liu 3041d5d885fdSSong Liu ret = r5l_load_log(log); 3042d5d885fdSSong Liu if (ret) { 3043d5d885fdSSong Liu struct mddev *mddev = log->rdev->mddev; 3044d5d885fdSSong Liu struct r5conf *conf = mddev->private; 3045d5d885fdSSong Liu 3046d5d885fdSSong Liu r5l_exit_log(conf); 3047d5d885fdSSong Liu } 3048d5d885fdSSong Liu return ret; 3049d5d885fdSSong Liu } 3050d5d885fdSSong Liu 305170d466f7SSong Liu void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) 30522e38a37fSSong Liu { 30532e38a37fSSong Liu struct r5conf *conf = mddev->private; 30542e38a37fSSong Liu struct r5l_log *log = conf->log; 30552e38a37fSSong Liu 30562e38a37fSSong Liu if (!log) 30572e38a37fSSong Liu return; 30582e38a37fSSong Liu 305970d466f7SSong Liu if ((raid5_calc_degraded(conf) > 0 || 306070d466f7SSong Liu test_bit(Journal, &rdev->flags)) && 30612e38a37fSSong Liu conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) 30622e38a37fSSong Liu schedule_work(&log->disable_writeback_work); 30632e38a37fSSong Liu } 30642e38a37fSSong Liu 3065f6bed0efSShaohua Li int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) 3066f6bed0efSShaohua Li { 3067c888a8f9SJens Axboe struct request_queue *q = bdev_get_queue(rdev->bdev); 3068f6bed0efSShaohua Li struct r5l_log *log; 3069ff875738SArtur Paszkiewicz char b[BDEVNAME_SIZE]; 3070afeee514SKent Overstreet int ret; 3071ff875738SArtur Paszkiewicz 3072ff875738SArtur Paszkiewicz pr_debug("md/raid:%s: using device %s as journal\n", 3073ff875738SArtur Paszkiewicz mdname(conf->mddev), bdevname(rdev->bdev, b)); 3074f6bed0efSShaohua Li 3075f6bed0efSShaohua Li if (PAGE_SIZE != 4096) 3076f6bed0efSShaohua Li return -EINVAL; 3077c757ec95SSong Liu 3078c757ec95SSong Liu /* 3079c757ec95SSong Liu * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and 3080c757ec95SSong Liu * raid_disks r5l_payload_data_parity. 3081c757ec95SSong Liu * 3082c757ec95SSong Liu * Write journal and cache does not work for very big array 3083c757ec95SSong Liu * (raid_disks > 203) 3084c757ec95SSong Liu */ 3085c757ec95SSong Liu if (sizeof(struct r5l_meta_block) + 3086c757ec95SSong Liu ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) * 3087c757ec95SSong Liu conf->raid_disks) > PAGE_SIZE) { 3088c757ec95SSong Liu pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n", 3089c757ec95SSong Liu mdname(conf->mddev), conf->raid_disks); 3090c757ec95SSong Liu return -EINVAL; 3091c757ec95SSong Liu } 3092c757ec95SSong Liu 3093f6bed0efSShaohua Li log = kzalloc(sizeof(*log), GFP_KERNEL); 3094f6bed0efSShaohua Li if (!log) 3095f6bed0efSShaohua Li return -ENOMEM; 3096f6bed0efSShaohua Li log->rdev = rdev; 3097f6bed0efSShaohua Li 3098c888a8f9SJens Axboe log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0; 309956fef7c6SChristoph Hellwig 31005cb2fbd6SShaohua Li log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, 3101f6bed0efSShaohua Li sizeof(rdev->mddev->uuid)); 3102f6bed0efSShaohua Li 3103f6bed0efSShaohua Li mutex_init(&log->io_mutex); 3104f6bed0efSShaohua Li 3105f6bed0efSShaohua Li spin_lock_init(&log->io_list_lock); 3106f6bed0efSShaohua Li INIT_LIST_HEAD(&log->running_ios); 31070576b1c6SShaohua Li INIT_LIST_HEAD(&log->io_end_ios); 3108a8c34f91SShaohua Li INIT_LIST_HEAD(&log->flushing_ios); 310904732f74SChristoph Hellwig INIT_LIST_HEAD(&log->finished_ios); 3110f6bed0efSShaohua Li 3111f6bed0efSShaohua Li log->io_kc = KMEM_CACHE(r5l_io_unit, 0); 3112f6bed0efSShaohua Li if (!log->io_kc) 3113f6bed0efSShaohua Li goto io_kc; 3114f6bed0efSShaohua Li 3115afeee514SKent Overstreet ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc); 3116afeee514SKent Overstreet if (ret) 31175036c390SChristoph Hellwig goto io_pool; 31185036c390SChristoph Hellwig 3119afeee514SKent Overstreet ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS); 3120afeee514SKent Overstreet if (ret) 3121c38d29b3SChristoph Hellwig goto io_bs; 3122c38d29b3SChristoph Hellwig 3123afeee514SKent Overstreet ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0); 3124afeee514SKent Overstreet if (ret) 3125e8deb638SChristoph Hellwig goto out_mempool; 3126e8deb638SChristoph Hellwig 312703b047f4SSong Liu spin_lock_init(&log->tree_lock); 312803b047f4SSong Liu INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN); 312903b047f4SSong Liu 31300576b1c6SShaohua Li log->reclaim_thread = md_register_thread(r5l_reclaim_thread, 31310576b1c6SShaohua Li log->rdev->mddev, "reclaim"); 31320576b1c6SShaohua Li if (!log->reclaim_thread) 31330576b1c6SShaohua Li goto reclaim_thread; 3134a39f7afdSSong Liu log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; 3135a39f7afdSSong Liu 31360fd22b45SShaohua Li init_waitqueue_head(&log->iounit_wait); 31370576b1c6SShaohua Li 31385036c390SChristoph Hellwig INIT_LIST_HEAD(&log->no_mem_stripes); 31395036c390SChristoph Hellwig 3140f6bed0efSShaohua Li INIT_LIST_HEAD(&log->no_space_stripes); 3141f6bed0efSShaohua Li spin_lock_init(&log->no_space_stripes_lock); 3142f6bed0efSShaohua Li 31433bddb7f8SSong Liu INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); 31442e38a37fSSong Liu INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); 31453bddb7f8SSong Liu 31462ded3703SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 3147a39f7afdSSong Liu INIT_LIST_HEAD(&log->stripe_in_journal_list); 3148a39f7afdSSong Liu spin_lock_init(&log->stripe_in_journal_lock); 3149a39f7afdSSong Liu atomic_set(&log->stripe_in_journal_count, 0); 31502ded3703SSong Liu 3151d2250f10SSong Liu rcu_assign_pointer(conf->log, log); 3152d2250f10SSong Liu 3153a62ab49eSShaohua Li set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 3154f6bed0efSShaohua Li return 0; 3155e8deb638SChristoph Hellwig 31560576b1c6SShaohua Li reclaim_thread: 3157afeee514SKent Overstreet mempool_exit(&log->meta_pool); 3158e8deb638SChristoph Hellwig out_mempool: 3159afeee514SKent Overstreet bioset_exit(&log->bs); 3160c38d29b3SChristoph Hellwig io_bs: 3161afeee514SKent Overstreet mempool_exit(&log->io_pool); 31625036c390SChristoph Hellwig io_pool: 3163f6bed0efSShaohua Li kmem_cache_destroy(log->io_kc); 3164f6bed0efSShaohua Li io_kc: 3165f6bed0efSShaohua Li kfree(log); 3166f6bed0efSShaohua Li return -EINVAL; 3167f6bed0efSShaohua Li } 3168f6bed0efSShaohua Li 3169ff875738SArtur Paszkiewicz void r5l_exit_log(struct r5conf *conf) 3170f6bed0efSShaohua Li { 3171ff875738SArtur Paszkiewicz struct r5l_log *log = conf->log; 3172ff875738SArtur Paszkiewicz 3173ff875738SArtur Paszkiewicz conf->log = NULL; 3174ff875738SArtur Paszkiewicz synchronize_rcu(); 3175ff875738SArtur Paszkiewicz 31764d5324f7SNeilBrown /* Ensure disable_writeback_work wakes up and exits */ 31774d5324f7SNeilBrown wake_up(&conf->mddev->sb_wait); 31782e38a37fSSong Liu flush_work(&log->disable_writeback_work); 31790576b1c6SShaohua Li md_unregister_thread(&log->reclaim_thread); 3180afeee514SKent Overstreet mempool_exit(&log->meta_pool); 3181afeee514SKent Overstreet bioset_exit(&log->bs); 3182afeee514SKent Overstreet mempool_exit(&log->io_pool); 3183f6bed0efSShaohua Li kmem_cache_destroy(log->io_kc); 3184f6bed0efSShaohua Li kfree(log); 3185f6bed0efSShaohua Li } 3186