1f6bed0efSShaohua Li /* 2f6bed0efSShaohua Li * Copyright (C) 2015 Shaohua Li <shli@fb.com> 3b4c625c6SSong Liu * Copyright (C) 2016 Song Liu <songliubraving@fb.com> 4f6bed0efSShaohua Li * 5f6bed0efSShaohua Li * This program is free software; you can redistribute it and/or modify it 6f6bed0efSShaohua Li * under the terms and conditions of the GNU General Public License, 7f6bed0efSShaohua Li * version 2, as published by the Free Software Foundation. 8f6bed0efSShaohua Li * 9f6bed0efSShaohua Li * This program is distributed in the hope it will be useful, but WITHOUT 10f6bed0efSShaohua Li * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11f6bed0efSShaohua Li * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12f6bed0efSShaohua Li * more details. 13f6bed0efSShaohua Li * 14f6bed0efSShaohua Li */ 15f6bed0efSShaohua Li #include <linux/kernel.h> 16f6bed0efSShaohua Li #include <linux/wait.h> 17f6bed0efSShaohua Li #include <linux/blkdev.h> 18f6bed0efSShaohua Li #include <linux/slab.h> 19f6bed0efSShaohua Li #include <linux/raid/md_p.h> 205cb2fbd6SShaohua Li #include <linux/crc32c.h> 21f6bed0efSShaohua Li #include <linux/random.h> 22ce1ccd07SShaohua Li #include <linux/kthread.h> 2303b047f4SSong Liu #include <linux/types.h> 24f6bed0efSShaohua Li #include "md.h" 25f6bed0efSShaohua Li #include "raid5.h" 26*935fe098SMike Snitzer #include "md-bitmap.h" 2770d466f7SSong Liu #include "raid5-log.h" 28f6bed0efSShaohua Li 29f6bed0efSShaohua Li /* 30f6bed0efSShaohua Li * metadata/data stored in disk with 4k size unit (a block) regardless 31f6bed0efSShaohua Li * underneath hardware sector size. only works with PAGE_SIZE == 4096 32f6bed0efSShaohua Li */ 33f6bed0efSShaohua Li #define BLOCK_SECTORS (8) 34effe6ee7SSong Liu #define BLOCK_SECTOR_SHIFT (3) 35f6bed0efSShaohua Li 360576b1c6SShaohua Li /* 37a39f7afdSSong Liu * log->max_free_space is min(1/4 disk size, 10G reclaimable space). 38a39f7afdSSong Liu * 39a39f7afdSSong Liu * In write through mode, the reclaim runs every log->max_free_space. 40a39f7afdSSong Liu * This can prevent the recovery scans for too long 410576b1c6SShaohua Li */ 420576b1c6SShaohua Li #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ 430576b1c6SShaohua Li #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) 440576b1c6SShaohua Li 45a39f7afdSSong Liu /* wake up reclaim thread periodically */ 46a39f7afdSSong Liu #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ) 47a39f7afdSSong Liu /* start flush with these full stripes */ 4884890c03SShaohua Li #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4) 49a39f7afdSSong Liu /* reclaim stripes in groups */ 50a39f7afdSSong Liu #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2) 51a39f7afdSSong Liu 52c38d29b3SChristoph Hellwig /* 53c38d29b3SChristoph Hellwig * We only need 2 bios per I/O unit to make progress, but ensure we 54c38d29b3SChristoph Hellwig * have a few more available to not get too tight. 55c38d29b3SChristoph Hellwig */ 56c38d29b3SChristoph Hellwig #define R5L_POOL_SIZE 4 57c38d29b3SChristoph Hellwig 582c7da14bSSong Liu static char *r5c_journal_mode_str[] = {"write-through", 592c7da14bSSong Liu "write-back"}; 602ded3703SSong Liu /* 612ded3703SSong Liu * raid5 cache state machine 622ded3703SSong Liu * 639b69173eSJackieLiu * With the RAID cache, each stripe works in two phases: 642ded3703SSong Liu * - caching phase 652ded3703SSong Liu * - writing-out phase 662ded3703SSong Liu * 672ded3703SSong Liu * These two phases are controlled by bit STRIPE_R5C_CACHING: 682ded3703SSong Liu * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase 692ded3703SSong Liu * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase 702ded3703SSong Liu * 712ded3703SSong Liu * When there is no journal, or the journal is in write-through mode, 722ded3703SSong Liu * the stripe is always in writing-out phase. 732ded3703SSong Liu * 742ded3703SSong Liu * For write-back journal, the stripe is sent to caching phase on write 752ded3703SSong Liu * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off 762ded3703SSong Liu * the write-out phase by clearing STRIPE_R5C_CACHING. 772ded3703SSong Liu * 782ded3703SSong Liu * Stripes in caching phase do not write the raid disks. Instead, all 792ded3703SSong Liu * writes are committed from the log device. Therefore, a stripe in 802ded3703SSong Liu * caching phase handles writes as: 812ded3703SSong Liu * - write to log device 822ded3703SSong Liu * - return IO 832ded3703SSong Liu * 842ded3703SSong Liu * Stripes in writing-out phase handle writes as: 852ded3703SSong Liu * - calculate parity 862ded3703SSong Liu * - write pending data and parity to journal 872ded3703SSong Liu * - write data and parity to raid disks 882ded3703SSong Liu * - return IO for pending writes 892ded3703SSong Liu */ 902ded3703SSong Liu 91f6bed0efSShaohua Li struct r5l_log { 92f6bed0efSShaohua Li struct md_rdev *rdev; 93f6bed0efSShaohua Li 94f6bed0efSShaohua Li u32 uuid_checksum; 95f6bed0efSShaohua Li 96f6bed0efSShaohua Li sector_t device_size; /* log device size, round to 97f6bed0efSShaohua Li * BLOCK_SECTORS */ 980576b1c6SShaohua Li sector_t max_free_space; /* reclaim run if free space is at 990576b1c6SShaohua Li * this size */ 100f6bed0efSShaohua Li 101f6bed0efSShaohua Li sector_t last_checkpoint; /* log tail. where recovery scan 102f6bed0efSShaohua Li * starts from */ 103f6bed0efSShaohua Li u64 last_cp_seq; /* log tail sequence */ 104f6bed0efSShaohua Li 105f6bed0efSShaohua Li sector_t log_start; /* log head. where new data appends */ 106f6bed0efSShaohua Li u64 seq; /* log head sequence */ 107f6bed0efSShaohua Li 10817036461SChristoph Hellwig sector_t next_checkpoint; 10917036461SChristoph Hellwig 110f6bed0efSShaohua Li struct mutex io_mutex; 111f6bed0efSShaohua Li struct r5l_io_unit *current_io; /* current io_unit accepting new data */ 112f6bed0efSShaohua Li 113f6bed0efSShaohua Li spinlock_t io_list_lock; 114f6bed0efSShaohua Li struct list_head running_ios; /* io_units which are still running, 115f6bed0efSShaohua Li * and have not yet been completely 116f6bed0efSShaohua Li * written to the log */ 117f6bed0efSShaohua Li struct list_head io_end_ios; /* io_units which have been completely 118f6bed0efSShaohua Li * written to the log but not yet written 119f6bed0efSShaohua Li * to the RAID */ 120a8c34f91SShaohua Li struct list_head flushing_ios; /* io_units which are waiting for log 121a8c34f91SShaohua Li * cache flush */ 12204732f74SChristoph Hellwig struct list_head finished_ios; /* io_units which settle down in log disk */ 123a8c34f91SShaohua Li struct bio flush_bio; 124f6bed0efSShaohua Li 1255036c390SChristoph Hellwig struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */ 1265036c390SChristoph Hellwig 127f6bed0efSShaohua Li struct kmem_cache *io_kc; 1285036c390SChristoph Hellwig mempool_t *io_pool; 129c38d29b3SChristoph Hellwig struct bio_set *bs; 130e8deb638SChristoph Hellwig mempool_t *meta_pool; 131f6bed0efSShaohua Li 1320576b1c6SShaohua Li struct md_thread *reclaim_thread; 1330576b1c6SShaohua Li unsigned long reclaim_target; /* number of space that need to be 1340576b1c6SShaohua Li * reclaimed. if it's 0, reclaim spaces 1350576b1c6SShaohua Li * used by io_units which are in 1360576b1c6SShaohua Li * IO_UNIT_STRIPE_END state (eg, reclaim 1370576b1c6SShaohua Li * dones't wait for specific io_unit 1380576b1c6SShaohua Li * switching to IO_UNIT_STRIPE_END 1390576b1c6SShaohua Li * state) */ 1400fd22b45SShaohua Li wait_queue_head_t iounit_wait; 1410576b1c6SShaohua Li 142f6bed0efSShaohua Li struct list_head no_space_stripes; /* pending stripes, log has no space */ 143f6bed0efSShaohua Li spinlock_t no_space_stripes_lock; 14456fef7c6SChristoph Hellwig 14556fef7c6SChristoph Hellwig bool need_cache_flush; 1462ded3703SSong Liu 1472ded3703SSong Liu /* for r5c_cache */ 1482ded3703SSong Liu enum r5c_journal_mode r5c_journal_mode; 149a39f7afdSSong Liu 150a39f7afdSSong Liu /* all stripes in r5cache, in the order of seq at sh->log_start */ 151a39f7afdSSong Liu struct list_head stripe_in_journal_list; 152a39f7afdSSong Liu 153a39f7afdSSong Liu spinlock_t stripe_in_journal_lock; 154a39f7afdSSong Liu atomic_t stripe_in_journal_count; 1553bddb7f8SSong Liu 1563bddb7f8SSong Liu /* to submit async io_units, to fulfill ordering of flush */ 1573bddb7f8SSong Liu struct work_struct deferred_io_work; 1582e38a37fSSong Liu /* to disable write back during in degraded mode */ 1592e38a37fSSong Liu struct work_struct disable_writeback_work; 16003b047f4SSong Liu 16103b047f4SSong Liu /* to for chunk_aligned_read in writeback mode, details below */ 16203b047f4SSong Liu spinlock_t tree_lock; 16303b047f4SSong Liu struct radix_tree_root big_stripe_tree; 164f6bed0efSShaohua Li }; 165f6bed0efSShaohua Li 166f6bed0efSShaohua Li /* 16703b047f4SSong Liu * Enable chunk_aligned_read() with write back cache. 16803b047f4SSong Liu * 16903b047f4SSong Liu * Each chunk may contain more than one stripe (for example, a 256kB 17003b047f4SSong Liu * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For 17103b047f4SSong Liu * chunk_aligned_read, these stripes are grouped into one "big_stripe". 17203b047f4SSong Liu * For each big_stripe, we count how many stripes of this big_stripe 17303b047f4SSong Liu * are in the write back cache. These data are tracked in a radix tree 17403b047f4SSong Liu * (big_stripe_tree). We use radix_tree item pointer as the counter. 17503b047f4SSong Liu * r5c_tree_index() is used to calculate keys for the radix tree. 17603b047f4SSong Liu * 17703b047f4SSong Liu * chunk_aligned_read() calls r5c_big_stripe_cached() to look up 17803b047f4SSong Liu * big_stripe of each chunk in the tree. If this big_stripe is in the 17903b047f4SSong Liu * tree, chunk_aligned_read() aborts. This look up is protected by 18003b047f4SSong Liu * rcu_read_lock(). 18103b047f4SSong Liu * 18203b047f4SSong Liu * It is necessary to remember whether a stripe is counted in 18303b047f4SSong Liu * big_stripe_tree. Instead of adding new flag, we reuses existing flags: 18403b047f4SSong Liu * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these 18503b047f4SSong Liu * two flags are set, the stripe is counted in big_stripe_tree. This 18603b047f4SSong Liu * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to 18703b047f4SSong Liu * r5c_try_caching_write(); and moving clear_bit of 18803b047f4SSong Liu * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to 18903b047f4SSong Liu * r5c_finish_stripe_write_out(). 19003b047f4SSong Liu */ 19103b047f4SSong Liu 19203b047f4SSong Liu /* 19303b047f4SSong Liu * radix tree requests lowest 2 bits of data pointer to be 2b'00. 19403b047f4SSong Liu * So it is necessary to left shift the counter by 2 bits before using it 19503b047f4SSong Liu * as data pointer of the tree. 19603b047f4SSong Liu */ 19703b047f4SSong Liu #define R5C_RADIX_COUNT_SHIFT 2 19803b047f4SSong Liu 19903b047f4SSong Liu /* 20003b047f4SSong Liu * calculate key for big_stripe_tree 20103b047f4SSong Liu * 20203b047f4SSong Liu * sect: align_bi->bi_iter.bi_sector or sh->sector 20303b047f4SSong Liu */ 20403b047f4SSong Liu static inline sector_t r5c_tree_index(struct r5conf *conf, 20503b047f4SSong Liu sector_t sect) 20603b047f4SSong Liu { 20703b047f4SSong Liu sector_t offset; 20803b047f4SSong Liu 20903b047f4SSong Liu offset = sector_div(sect, conf->chunk_sectors); 21003b047f4SSong Liu return sect; 21103b047f4SSong Liu } 21203b047f4SSong Liu 21303b047f4SSong Liu /* 214f6bed0efSShaohua Li * an IO range starts from a meta data block and end at the next meta data 215f6bed0efSShaohua Li * block. The io unit's the meta data block tracks data/parity followed it. io 216f6bed0efSShaohua Li * unit is written to log disk with normal write, as we always flush log disk 217f6bed0efSShaohua Li * first and then start move data to raid disks, there is no requirement to 218f6bed0efSShaohua Li * write io unit with FLUSH/FUA 219f6bed0efSShaohua Li */ 220f6bed0efSShaohua Li struct r5l_io_unit { 221f6bed0efSShaohua Li struct r5l_log *log; 222f6bed0efSShaohua Li 223f6bed0efSShaohua Li struct page *meta_page; /* store meta block */ 224f6bed0efSShaohua Li int meta_offset; /* current offset in meta_page */ 225f6bed0efSShaohua Li 226f6bed0efSShaohua Li struct bio *current_bio;/* current_bio accepting new data */ 227f6bed0efSShaohua Li 228f6bed0efSShaohua Li atomic_t pending_stripe;/* how many stripes not flushed to raid */ 229f6bed0efSShaohua Li u64 seq; /* seq number of the metablock */ 230f6bed0efSShaohua Li sector_t log_start; /* where the io_unit starts */ 231f6bed0efSShaohua Li sector_t log_end; /* where the io_unit ends */ 232f6bed0efSShaohua Li struct list_head log_sibling; /* log->running_ios */ 233f6bed0efSShaohua Li struct list_head stripe_list; /* stripes added to the io_unit */ 234f6bed0efSShaohua Li 235f6bed0efSShaohua Li int state; 2366143e2ceSChristoph Hellwig bool need_split_bio; 2373bddb7f8SSong Liu struct bio *split_bio; 2383bddb7f8SSong Liu 2393bddb7f8SSong Liu unsigned int has_flush:1; /* include flush request */ 2403bddb7f8SSong Liu unsigned int has_fua:1; /* include fua request */ 241a9501d74SSong Liu unsigned int has_null_flush:1; /* include null flush request */ 242a9501d74SSong Liu unsigned int has_flush_payload:1; /* include flush payload */ 2433bddb7f8SSong Liu /* 2443bddb7f8SSong Liu * io isn't sent yet, flush/fua request can only be submitted till it's 2453bddb7f8SSong Liu * the first IO in running_ios list 2463bddb7f8SSong Liu */ 2473bddb7f8SSong Liu unsigned int io_deferred:1; 2483bddb7f8SSong Liu 2493bddb7f8SSong Liu struct bio_list flush_barriers; /* size == 0 flush bios */ 250f6bed0efSShaohua Li }; 251f6bed0efSShaohua Li 252f6bed0efSShaohua Li /* r5l_io_unit state */ 253f6bed0efSShaohua Li enum r5l_io_unit_state { 254f6bed0efSShaohua Li IO_UNIT_RUNNING = 0, /* accepting new IO */ 255f6bed0efSShaohua Li IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, 256f6bed0efSShaohua Li * don't accepting new bio */ 257f6bed0efSShaohua Li IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ 258a8c34f91SShaohua Li IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ 259f6bed0efSShaohua Li }; 260f6bed0efSShaohua Li 2612ded3703SSong Liu bool r5c_is_writeback(struct r5l_log *log) 2622ded3703SSong Liu { 2632ded3703SSong Liu return (log != NULL && 2642ded3703SSong Liu log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK); 2652ded3703SSong Liu } 2662ded3703SSong Liu 267f6bed0efSShaohua Li static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) 268f6bed0efSShaohua Li { 269f6bed0efSShaohua Li start += inc; 270f6bed0efSShaohua Li if (start >= log->device_size) 271f6bed0efSShaohua Li start = start - log->device_size; 272f6bed0efSShaohua Li return start; 273f6bed0efSShaohua Li } 274f6bed0efSShaohua Li 275f6bed0efSShaohua Li static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, 276f6bed0efSShaohua Li sector_t end) 277f6bed0efSShaohua Li { 278f6bed0efSShaohua Li if (end >= start) 279f6bed0efSShaohua Li return end - start; 280f6bed0efSShaohua Li else 281f6bed0efSShaohua Li return end + log->device_size - start; 282f6bed0efSShaohua Li } 283f6bed0efSShaohua Li 284f6bed0efSShaohua Li static bool r5l_has_free_space(struct r5l_log *log, sector_t size) 285f6bed0efSShaohua Li { 286f6bed0efSShaohua Li sector_t used_size; 287f6bed0efSShaohua Li 288f6bed0efSShaohua Li used_size = r5l_ring_distance(log, log->last_checkpoint, 289f6bed0efSShaohua Li log->log_start); 290f6bed0efSShaohua Li 291f6bed0efSShaohua Li return log->device_size > used_size + size; 292f6bed0efSShaohua Li } 293f6bed0efSShaohua Li 294f6bed0efSShaohua Li static void __r5l_set_io_unit_state(struct r5l_io_unit *io, 295f6bed0efSShaohua Li enum r5l_io_unit_state state) 296f6bed0efSShaohua Li { 297f6bed0efSShaohua Li if (WARN_ON(io->state >= state)) 298f6bed0efSShaohua Li return; 299f6bed0efSShaohua Li io->state = state; 300f6bed0efSShaohua Li } 301f6bed0efSShaohua Li 3021e6d690bSSong Liu static void 303bd83d0a2SNeilBrown r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev) 3041e6d690bSSong Liu { 3051e6d690bSSong Liu struct bio *wbi, *wbi2; 3061e6d690bSSong Liu 3071e6d690bSSong Liu wbi = dev->written; 3081e6d690bSSong Liu dev->written = NULL; 3091e6d690bSSong Liu while (wbi && wbi->bi_iter.bi_sector < 3101e6d690bSSong Liu dev->sector + STRIPE_SECTORS) { 3111e6d690bSSong Liu wbi2 = r5_next_bio(wbi, dev->sector); 3121e6d690bSSong Liu md_write_end(conf->mddev); 313bd83d0a2SNeilBrown bio_endio(wbi); 3141e6d690bSSong Liu wbi = wbi2; 3151e6d690bSSong Liu } 3161e6d690bSSong Liu } 3171e6d690bSSong Liu 3181e6d690bSSong Liu void r5c_handle_cached_data_endio(struct r5conf *conf, 319bd83d0a2SNeilBrown struct stripe_head *sh, int disks) 3201e6d690bSSong Liu { 3211e6d690bSSong Liu int i; 3221e6d690bSSong Liu 3231e6d690bSSong Liu for (i = sh->disks; i--; ) { 3241e6d690bSSong Liu if (sh->dev[i].written) { 3251e6d690bSSong Liu set_bit(R5_UPTODATE, &sh->dev[i].flags); 326bd83d0a2SNeilBrown r5c_return_dev_pending_writes(conf, &sh->dev[i]); 3271e6d690bSSong Liu bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3281e6d690bSSong Liu STRIPE_SECTORS, 3291e6d690bSSong Liu !test_bit(STRIPE_DEGRADED, &sh->state), 3301e6d690bSSong Liu 0); 3311e6d690bSSong Liu } 3321e6d690bSSong Liu } 3331e6d690bSSong Liu } 3341e6d690bSSong Liu 335ff875738SArtur Paszkiewicz void r5l_wake_reclaim(struct r5l_log *log, sector_t space); 336ff875738SArtur Paszkiewicz 337a39f7afdSSong Liu /* Check whether we should flush some stripes to free up stripe cache */ 338a39f7afdSSong Liu void r5c_check_stripe_cache_usage(struct r5conf *conf) 339a39f7afdSSong Liu { 340a39f7afdSSong Liu int total_cached; 341a39f7afdSSong Liu 342a39f7afdSSong Liu if (!r5c_is_writeback(conf->log)) 343a39f7afdSSong Liu return; 344a39f7afdSSong Liu 345a39f7afdSSong Liu total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + 346a39f7afdSSong Liu atomic_read(&conf->r5c_cached_full_stripes); 347a39f7afdSSong Liu 348a39f7afdSSong Liu /* 349a39f7afdSSong Liu * The following condition is true for either of the following: 350a39f7afdSSong Liu * - stripe cache pressure high: 351a39f7afdSSong Liu * total_cached > 3/4 min_nr_stripes || 352a39f7afdSSong Liu * empty_inactive_list_nr > 0 353a39f7afdSSong Liu * - stripe cache pressure moderate: 354a39f7afdSSong Liu * total_cached > 1/2 min_nr_stripes 355a39f7afdSSong Liu */ 356a39f7afdSSong Liu if (total_cached > conf->min_nr_stripes * 1 / 2 || 357a39f7afdSSong Liu atomic_read(&conf->empty_inactive_list_nr) > 0) 358a39f7afdSSong Liu r5l_wake_reclaim(conf->log, 0); 359a39f7afdSSong Liu } 360a39f7afdSSong Liu 361a39f7afdSSong Liu /* 362a39f7afdSSong Liu * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full 363a39f7afdSSong Liu * stripes in the cache 364a39f7afdSSong Liu */ 365a39f7afdSSong Liu void r5c_check_cached_full_stripe(struct r5conf *conf) 366a39f7afdSSong Liu { 367a39f7afdSSong Liu if (!r5c_is_writeback(conf->log)) 368a39f7afdSSong Liu return; 369a39f7afdSSong Liu 370a39f7afdSSong Liu /* 371a39f7afdSSong Liu * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes 372a39f7afdSSong Liu * or a full stripe (chunk size / 4k stripes). 373a39f7afdSSong Liu */ 374a39f7afdSSong Liu if (atomic_read(&conf->r5c_cached_full_stripes) >= 37584890c03SShaohua Li min(R5C_FULL_STRIPE_FLUSH_BATCH(conf), 376a39f7afdSSong Liu conf->chunk_sectors >> STRIPE_SHIFT)) 377a39f7afdSSong Liu r5l_wake_reclaim(conf->log, 0); 378a39f7afdSSong Liu } 379a39f7afdSSong Liu 380a39f7afdSSong Liu /* 381a39f7afdSSong Liu * Total log space (in sectors) needed to flush all data in cache 382a39f7afdSSong Liu * 38339b99586SSong Liu * To avoid deadlock due to log space, it is necessary to reserve log 38439b99586SSong Liu * space to flush critical stripes (stripes that occupying log space near 38539b99586SSong Liu * last_checkpoint). This function helps check how much log space is 38639b99586SSong Liu * required to flush all cached stripes. 387a39f7afdSSong Liu * 38839b99586SSong Liu * To reduce log space requirements, two mechanisms are used to give cache 38939b99586SSong Liu * flush higher priorities: 39039b99586SSong Liu * 1. In handle_stripe_dirtying() and schedule_reconstruction(), 39139b99586SSong Liu * stripes ALREADY in journal can be flushed w/o pending writes; 39239b99586SSong Liu * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal 39339b99586SSong Liu * can be delayed (r5l_add_no_space_stripe). 394a39f7afdSSong Liu * 39539b99586SSong Liu * In cache flush, the stripe goes through 1 and then 2. For a stripe that 39639b99586SSong Liu * already passed 1, flushing it requires at most (conf->max_degraded + 1) 39739b99586SSong Liu * pages of journal space. For stripes that has not passed 1, flushing it 39839b99586SSong Liu * requires (conf->raid_disks + 1) pages of journal space. There are at 39939b99586SSong Liu * most (conf->group_cnt + 1) stripe that passed 1. So total journal space 40039b99586SSong Liu * required to flush all cached stripes (in pages) is: 40139b99586SSong Liu * 40239b99586SSong Liu * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) + 40339b99586SSong Liu * (group_cnt + 1) * (raid_disks + 1) 40439b99586SSong Liu * or 40539b99586SSong Liu * (stripe_in_journal_count) * (max_degraded + 1) + 40639b99586SSong Liu * (group_cnt + 1) * (raid_disks - max_degraded) 407a39f7afdSSong Liu */ 408a39f7afdSSong Liu static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf) 409a39f7afdSSong Liu { 410a39f7afdSSong Liu struct r5l_log *log = conf->log; 411a39f7afdSSong Liu 412a39f7afdSSong Liu if (!r5c_is_writeback(log)) 413a39f7afdSSong Liu return 0; 414a39f7afdSSong Liu 41539b99586SSong Liu return BLOCK_SECTORS * 41639b99586SSong Liu ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) + 41739b99586SSong Liu (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1)); 418a39f7afdSSong Liu } 419a39f7afdSSong Liu 420a39f7afdSSong Liu /* 421a39f7afdSSong Liu * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL 422a39f7afdSSong Liu * 423a39f7afdSSong Liu * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of 424a39f7afdSSong Liu * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log 425a39f7afdSSong Liu * device is less than 2x of reclaim_required_space. 426a39f7afdSSong Liu */ 427a39f7afdSSong Liu static inline void r5c_update_log_state(struct r5l_log *log) 428a39f7afdSSong Liu { 429a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private; 430a39f7afdSSong Liu sector_t free_space; 431a39f7afdSSong Liu sector_t reclaim_space; 432f687a33eSSong Liu bool wake_reclaim = false; 433a39f7afdSSong Liu 434a39f7afdSSong Liu if (!r5c_is_writeback(log)) 435a39f7afdSSong Liu return; 436a39f7afdSSong Liu 437a39f7afdSSong Liu free_space = r5l_ring_distance(log, log->log_start, 438a39f7afdSSong Liu log->last_checkpoint); 439a39f7afdSSong Liu reclaim_space = r5c_log_required_to_flush_cache(conf); 440a39f7afdSSong Liu if (free_space < 2 * reclaim_space) 441a39f7afdSSong Liu set_bit(R5C_LOG_CRITICAL, &conf->cache_state); 442f687a33eSSong Liu else { 443f687a33eSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) 444f687a33eSSong Liu wake_reclaim = true; 445a39f7afdSSong Liu clear_bit(R5C_LOG_CRITICAL, &conf->cache_state); 446f687a33eSSong Liu } 447a39f7afdSSong Liu if (free_space < 3 * reclaim_space) 448a39f7afdSSong Liu set_bit(R5C_LOG_TIGHT, &conf->cache_state); 449a39f7afdSSong Liu else 450a39f7afdSSong Liu clear_bit(R5C_LOG_TIGHT, &conf->cache_state); 451f687a33eSSong Liu 452f687a33eSSong Liu if (wake_reclaim) 453f687a33eSSong Liu r5l_wake_reclaim(log, 0); 454a39f7afdSSong Liu } 455a39f7afdSSong Liu 4562ded3703SSong Liu /* 4572ded3703SSong Liu * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING. 4582ded3703SSong Liu * This function should only be called in write-back mode. 4592ded3703SSong Liu */ 460a39f7afdSSong Liu void r5c_make_stripe_write_out(struct stripe_head *sh) 4612ded3703SSong Liu { 4622ded3703SSong Liu struct r5conf *conf = sh->raid_conf; 4632ded3703SSong Liu struct r5l_log *log = conf->log; 4642ded3703SSong Liu 4652ded3703SSong Liu BUG_ON(!r5c_is_writeback(log)); 4662ded3703SSong Liu 4672ded3703SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 4682ded3703SSong Liu clear_bit(STRIPE_R5C_CACHING, &sh->state); 4691e6d690bSSong Liu 4701e6d690bSSong Liu if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4711e6d690bSSong Liu atomic_inc(&conf->preread_active_stripes); 4721e6d690bSSong Liu } 4731e6d690bSSong Liu 4741e6d690bSSong Liu static void r5c_handle_data_cached(struct stripe_head *sh) 4751e6d690bSSong Liu { 4761e6d690bSSong Liu int i; 4771e6d690bSSong Liu 4781e6d690bSSong Liu for (i = sh->disks; i--; ) 4791e6d690bSSong Liu if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 4801e6d690bSSong Liu set_bit(R5_InJournal, &sh->dev[i].flags); 4811e6d690bSSong Liu clear_bit(R5_LOCKED, &sh->dev[i].flags); 4821e6d690bSSong Liu } 4831e6d690bSSong Liu clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 4841e6d690bSSong Liu } 4851e6d690bSSong Liu 4861e6d690bSSong Liu /* 4871e6d690bSSong Liu * this journal write must contain full parity, 4881e6d690bSSong Liu * it may also contain some data pages 4891e6d690bSSong Liu */ 4901e6d690bSSong Liu static void r5c_handle_parity_cached(struct stripe_head *sh) 4911e6d690bSSong Liu { 4921e6d690bSSong Liu int i; 4931e6d690bSSong Liu 4941e6d690bSSong Liu for (i = sh->disks; i--; ) 4951e6d690bSSong Liu if (test_bit(R5_InJournal, &sh->dev[i].flags)) 4961e6d690bSSong Liu set_bit(R5_Wantwrite, &sh->dev[i].flags); 4972ded3703SSong Liu } 4982ded3703SSong Liu 4992ded3703SSong Liu /* 5002ded3703SSong Liu * Setting proper flags after writing (or flushing) data and/or parity to the 5012ded3703SSong Liu * log device. This is called from r5l_log_endio() or r5l_log_flush_endio(). 5022ded3703SSong Liu */ 5032ded3703SSong Liu static void r5c_finish_cache_stripe(struct stripe_head *sh) 5042ded3703SSong Liu { 5052ded3703SSong Liu struct r5l_log *log = sh->raid_conf->log; 5062ded3703SSong Liu 5072ded3703SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 5082ded3703SSong Liu BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 5092ded3703SSong Liu /* 5102ded3703SSong Liu * Set R5_InJournal for parity dev[pd_idx]. This means 5112ded3703SSong Liu * all data AND parity in the journal. For RAID 6, it is 5122ded3703SSong Liu * NOT necessary to set the flag for dev[qd_idx], as the 5132ded3703SSong Liu * two parities are written out together. 5142ded3703SSong Liu */ 5152ded3703SSong Liu set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 5161e6d690bSSong Liu } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) { 5171e6d690bSSong Liu r5c_handle_data_cached(sh); 5181e6d690bSSong Liu } else { 5191e6d690bSSong Liu r5c_handle_parity_cached(sh); 5201e6d690bSSong Liu set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 5211e6d690bSSong Liu } 5222ded3703SSong Liu } 5232ded3703SSong Liu 524d8858f43SChristoph Hellwig static void r5l_io_run_stripes(struct r5l_io_unit *io) 525d8858f43SChristoph Hellwig { 526d8858f43SChristoph Hellwig struct stripe_head *sh, *next; 527d8858f43SChristoph Hellwig 528d8858f43SChristoph Hellwig list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { 529d8858f43SChristoph Hellwig list_del_init(&sh->log_list); 5302ded3703SSong Liu 5312ded3703SSong Liu r5c_finish_cache_stripe(sh); 5322ded3703SSong Liu 533d8858f43SChristoph Hellwig set_bit(STRIPE_HANDLE, &sh->state); 534d8858f43SChristoph Hellwig raid5_release_stripe(sh); 535d8858f43SChristoph Hellwig } 536d8858f43SChristoph Hellwig } 537d8858f43SChristoph Hellwig 53856fef7c6SChristoph Hellwig static void r5l_log_run_stripes(struct r5l_log *log) 53956fef7c6SChristoph Hellwig { 54056fef7c6SChristoph Hellwig struct r5l_io_unit *io, *next; 54156fef7c6SChristoph Hellwig 54256fef7c6SChristoph Hellwig assert_spin_locked(&log->io_list_lock); 54356fef7c6SChristoph Hellwig 54456fef7c6SChristoph Hellwig list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 54556fef7c6SChristoph Hellwig /* don't change list order */ 54656fef7c6SChristoph Hellwig if (io->state < IO_UNIT_IO_END) 54756fef7c6SChristoph Hellwig break; 54856fef7c6SChristoph Hellwig 54956fef7c6SChristoph Hellwig list_move_tail(&io->log_sibling, &log->finished_ios); 55056fef7c6SChristoph Hellwig r5l_io_run_stripes(io); 55156fef7c6SChristoph Hellwig } 55256fef7c6SChristoph Hellwig } 55356fef7c6SChristoph Hellwig 5543848c0bcSChristoph Hellwig static void r5l_move_to_end_ios(struct r5l_log *log) 5553848c0bcSChristoph Hellwig { 5563848c0bcSChristoph Hellwig struct r5l_io_unit *io, *next; 5573848c0bcSChristoph Hellwig 5583848c0bcSChristoph Hellwig assert_spin_locked(&log->io_list_lock); 5593848c0bcSChristoph Hellwig 5603848c0bcSChristoph Hellwig list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 5613848c0bcSChristoph Hellwig /* don't change list order */ 5623848c0bcSChristoph Hellwig if (io->state < IO_UNIT_IO_END) 5633848c0bcSChristoph Hellwig break; 5643848c0bcSChristoph Hellwig list_move_tail(&io->log_sibling, &log->io_end_ios); 5653848c0bcSChristoph Hellwig } 5663848c0bcSChristoph Hellwig } 5673848c0bcSChristoph Hellwig 5683bddb7f8SSong Liu static void __r5l_stripe_write_finished(struct r5l_io_unit *io); 569f6bed0efSShaohua Li static void r5l_log_endio(struct bio *bio) 570f6bed0efSShaohua Li { 571f6bed0efSShaohua Li struct r5l_io_unit *io = bio->bi_private; 5723bddb7f8SSong Liu struct r5l_io_unit *io_deferred; 573f6bed0efSShaohua Li struct r5l_log *log = io->log; 574509ffec7SChristoph Hellwig unsigned long flags; 575a9501d74SSong Liu bool has_null_flush; 576a9501d74SSong Liu bool has_flush_payload; 577f6bed0efSShaohua Li 5784e4cbee9SChristoph Hellwig if (bio->bi_status) 5796e74a9cfSShaohua Li md_error(log->rdev->mddev, log->rdev); 5806e74a9cfSShaohua Li 581f6bed0efSShaohua Li bio_put(bio); 582e8deb638SChristoph Hellwig mempool_free(io->meta_page, log->meta_pool); 583f6bed0efSShaohua Li 584509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags); 585509ffec7SChristoph Hellwig __r5l_set_io_unit_state(io, IO_UNIT_IO_END); 586a9501d74SSong Liu 587a9501d74SSong Liu /* 588a9501d74SSong Liu * if the io doesn't not have null_flush or flush payload, 589a9501d74SSong Liu * it is not safe to access it after releasing io_list_lock. 590a9501d74SSong Liu * Therefore, it is necessary to check the condition with 591a9501d74SSong Liu * the lock held. 592a9501d74SSong Liu */ 593a9501d74SSong Liu has_null_flush = io->has_null_flush; 594a9501d74SSong Liu has_flush_payload = io->has_flush_payload; 595a9501d74SSong Liu 596ea17481fSSong Liu if (log->need_cache_flush && !list_empty(&io->stripe_list)) 5973848c0bcSChristoph Hellwig r5l_move_to_end_ios(log); 59856fef7c6SChristoph Hellwig else 59956fef7c6SChristoph Hellwig r5l_log_run_stripes(log); 6003bddb7f8SSong Liu if (!list_empty(&log->running_ios)) { 6013bddb7f8SSong Liu /* 6023bddb7f8SSong Liu * FLUSH/FUA io_unit is deferred because of ordering, now we 6033bddb7f8SSong Liu * can dispatch it 6043bddb7f8SSong Liu */ 6053bddb7f8SSong Liu io_deferred = list_first_entry(&log->running_ios, 6063bddb7f8SSong Liu struct r5l_io_unit, log_sibling); 6073bddb7f8SSong Liu if (io_deferred->io_deferred) 6083bddb7f8SSong Liu schedule_work(&log->deferred_io_work); 6093bddb7f8SSong Liu } 6103bddb7f8SSong Liu 611509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags); 612509ffec7SChristoph Hellwig 61356fef7c6SChristoph Hellwig if (log->need_cache_flush) 614f6bed0efSShaohua Li md_wakeup_thread(log->rdev->mddev->thread); 6153bddb7f8SSong Liu 616a9501d74SSong Liu /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ 617a9501d74SSong Liu if (has_null_flush) { 6183bddb7f8SSong Liu struct bio *bi; 6193bddb7f8SSong Liu 6203bddb7f8SSong Liu WARN_ON(bio_list_empty(&io->flush_barriers)); 6213bddb7f8SSong Liu while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { 6223bddb7f8SSong Liu bio_endio(bi); 623a9501d74SSong Liu if (atomic_dec_and_test(&io->pending_stripe)) { 624a9501d74SSong Liu __r5l_stripe_write_finished(io); 625a9501d74SSong Liu return; 6263bddb7f8SSong Liu } 627ea17481fSSong Liu } 628a9501d74SSong Liu } 629a9501d74SSong Liu /* decrease pending_stripe for flush payload */ 630a9501d74SSong Liu if (has_flush_payload) 631a9501d74SSong Liu if (atomic_dec_and_test(&io->pending_stripe)) 6323bddb7f8SSong Liu __r5l_stripe_write_finished(io); 6333bddb7f8SSong Liu } 6343bddb7f8SSong Liu 6353bddb7f8SSong Liu static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) 6363bddb7f8SSong Liu { 6373bddb7f8SSong Liu unsigned long flags; 6383bddb7f8SSong Liu 6393bddb7f8SSong Liu spin_lock_irqsave(&log->io_list_lock, flags); 6403bddb7f8SSong Liu __r5l_set_io_unit_state(io, IO_UNIT_IO_START); 6413bddb7f8SSong Liu spin_unlock_irqrestore(&log->io_list_lock, flags); 6423bddb7f8SSong Liu 643bb3338d3SSong Liu /* 644bb3338d3SSong Liu * In case of journal device failures, submit_bio will get error 645bb3338d3SSong Liu * and calls endio, then active stripes will continue write 646bb3338d3SSong Liu * process. Therefore, it is not necessary to check Faulty bit 647bb3338d3SSong Liu * of journal device here. 648bb3338d3SSong Liu * 649bb3338d3SSong Liu * We can't check split_bio after current_bio is submitted. If 650bb3338d3SSong Liu * io->split_bio is null, after current_bio is submitted, current_bio 651bb3338d3SSong Liu * might already be completed and the io_unit is freed. We submit 652bb3338d3SSong Liu * split_bio first to avoid the issue. 653bb3338d3SSong Liu */ 654bb3338d3SSong Liu if (io->split_bio) { 6553bddb7f8SSong Liu if (io->has_flush) 65620737738SShaohua Li io->split_bio->bi_opf |= REQ_PREFLUSH; 6573bddb7f8SSong Liu if (io->has_fua) 65820737738SShaohua Li io->split_bio->bi_opf |= REQ_FUA; 6593bddb7f8SSong Liu submit_bio(io->split_bio); 6603bddb7f8SSong Liu } 6613bddb7f8SSong Liu 662bb3338d3SSong Liu if (io->has_flush) 663bb3338d3SSong Liu io->current_bio->bi_opf |= REQ_PREFLUSH; 664bb3338d3SSong Liu if (io->has_fua) 665bb3338d3SSong Liu io->current_bio->bi_opf |= REQ_FUA; 666bb3338d3SSong Liu submit_bio(io->current_bio); 667bb3338d3SSong Liu } 668bb3338d3SSong Liu 6693bddb7f8SSong Liu /* deferred io_unit will be dispatched here */ 6703bddb7f8SSong Liu static void r5l_submit_io_async(struct work_struct *work) 6713bddb7f8SSong Liu { 6723bddb7f8SSong Liu struct r5l_log *log = container_of(work, struct r5l_log, 6733bddb7f8SSong Liu deferred_io_work); 6743bddb7f8SSong Liu struct r5l_io_unit *io = NULL; 6753bddb7f8SSong Liu unsigned long flags; 6763bddb7f8SSong Liu 6773bddb7f8SSong Liu spin_lock_irqsave(&log->io_list_lock, flags); 6783bddb7f8SSong Liu if (!list_empty(&log->running_ios)) { 6793bddb7f8SSong Liu io = list_first_entry(&log->running_ios, struct r5l_io_unit, 6803bddb7f8SSong Liu log_sibling); 6813bddb7f8SSong Liu if (!io->io_deferred) 6823bddb7f8SSong Liu io = NULL; 6833bddb7f8SSong Liu else 6843bddb7f8SSong Liu io->io_deferred = 0; 6853bddb7f8SSong Liu } 6863bddb7f8SSong Liu spin_unlock_irqrestore(&log->io_list_lock, flags); 6873bddb7f8SSong Liu if (io) 6883bddb7f8SSong Liu r5l_do_submit_io(log, io); 689f6bed0efSShaohua Li } 690f6bed0efSShaohua Li 6912e38a37fSSong Liu static void r5c_disable_writeback_async(struct work_struct *work) 6922e38a37fSSong Liu { 6932e38a37fSSong Liu struct r5l_log *log = container_of(work, struct r5l_log, 6942e38a37fSSong Liu disable_writeback_work); 6952e38a37fSSong Liu struct mddev *mddev = log->rdev->mddev; 6962e38a37fSSong Liu 6972e38a37fSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 6982e38a37fSSong Liu return; 6992e38a37fSSong Liu pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", 7002e38a37fSSong Liu mdname(mddev)); 70170d466f7SSong Liu 70270d466f7SSong Liu /* wait superblock change before suspend */ 70370d466f7SSong Liu wait_event(mddev->sb_wait, 70470d466f7SSong Liu !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 70570d466f7SSong Liu 7062e38a37fSSong Liu mddev_suspend(mddev); 7072e38a37fSSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 7082e38a37fSSong Liu mddev_resume(mddev); 7092e38a37fSSong Liu } 7102e38a37fSSong Liu 711f6bed0efSShaohua Li static void r5l_submit_current_io(struct r5l_log *log) 712f6bed0efSShaohua Li { 713f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io; 7143bddb7f8SSong Liu struct bio *bio; 715f6bed0efSShaohua Li struct r5l_meta_block *block; 716509ffec7SChristoph Hellwig unsigned long flags; 717f6bed0efSShaohua Li u32 crc; 7183bddb7f8SSong Liu bool do_submit = true; 719f6bed0efSShaohua Li 720f6bed0efSShaohua Li if (!io) 721f6bed0efSShaohua Li return; 722f6bed0efSShaohua Li 723f6bed0efSShaohua Li block = page_address(io->meta_page); 724f6bed0efSShaohua Li block->meta_size = cpu_to_le32(io->meta_offset); 7255cb2fbd6SShaohua Li crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); 726f6bed0efSShaohua Li block->checksum = cpu_to_le32(crc); 7273bddb7f8SSong Liu bio = io->current_bio; 728f6bed0efSShaohua Li 729f6bed0efSShaohua Li log->current_io = NULL; 730509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags); 7313bddb7f8SSong Liu if (io->has_flush || io->has_fua) { 7323bddb7f8SSong Liu if (io != list_first_entry(&log->running_ios, 7333bddb7f8SSong Liu struct r5l_io_unit, log_sibling)) { 7343bddb7f8SSong Liu io->io_deferred = 1; 7353bddb7f8SSong Liu do_submit = false; 7363bddb7f8SSong Liu } 7373bddb7f8SSong Liu } 738509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags); 7393bddb7f8SSong Liu if (do_submit) 7403bddb7f8SSong Liu r5l_do_submit_io(log, io); 741f6bed0efSShaohua Li } 742f6bed0efSShaohua Li 7436143e2ceSChristoph Hellwig static struct bio *r5l_bio_alloc(struct r5l_log *log) 744b349feb3SChristoph Hellwig { 745c38d29b3SChristoph Hellwig struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); 746b349feb3SChristoph Hellwig 747796a5cf0SMike Christie bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 74874d46992SChristoph Hellwig bio_set_dev(bio, log->rdev->bdev); 7491e932a37SChristoph Hellwig bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; 750b349feb3SChristoph Hellwig 751b349feb3SChristoph Hellwig return bio; 752b349feb3SChristoph Hellwig } 753b349feb3SChristoph Hellwig 754c1b99198SChristoph Hellwig static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) 755c1b99198SChristoph Hellwig { 756c1b99198SChristoph Hellwig log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); 757c1b99198SChristoph Hellwig 758a39f7afdSSong Liu r5c_update_log_state(log); 759c1b99198SChristoph Hellwig /* 760c1b99198SChristoph Hellwig * If we filled up the log device start from the beginning again, 761c1b99198SChristoph Hellwig * which will require a new bio. 762c1b99198SChristoph Hellwig * 763c1b99198SChristoph Hellwig * Note: for this to work properly the log size needs to me a multiple 764c1b99198SChristoph Hellwig * of BLOCK_SECTORS. 765c1b99198SChristoph Hellwig */ 766c1b99198SChristoph Hellwig if (log->log_start == 0) 7676143e2ceSChristoph Hellwig io->need_split_bio = true; 768c1b99198SChristoph Hellwig 769c1b99198SChristoph Hellwig io->log_end = log->log_start; 770c1b99198SChristoph Hellwig } 771c1b99198SChristoph Hellwig 772f6bed0efSShaohua Li static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) 773f6bed0efSShaohua Li { 774f6bed0efSShaohua Li struct r5l_io_unit *io; 775f6bed0efSShaohua Li struct r5l_meta_block *block; 776f6bed0efSShaohua Li 7775036c390SChristoph Hellwig io = mempool_alloc(log->io_pool, GFP_ATOMIC); 7785036c390SChristoph Hellwig if (!io) 7795036c390SChristoph Hellwig return NULL; 7805036c390SChristoph Hellwig memset(io, 0, sizeof(*io)); 7815036c390SChristoph Hellwig 78251039cd0SChristoph Hellwig io->log = log; 78351039cd0SChristoph Hellwig INIT_LIST_HEAD(&io->log_sibling); 78451039cd0SChristoph Hellwig INIT_LIST_HEAD(&io->stripe_list); 7853bddb7f8SSong Liu bio_list_init(&io->flush_barriers); 78651039cd0SChristoph Hellwig io->state = IO_UNIT_RUNNING; 787f6bed0efSShaohua Li 788e8deb638SChristoph Hellwig io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO); 789f6bed0efSShaohua Li block = page_address(io->meta_page); 790e8deb638SChristoph Hellwig clear_page(block); 791f6bed0efSShaohua Li block->magic = cpu_to_le32(R5LOG_MAGIC); 792f6bed0efSShaohua Li block->version = R5LOG_VERSION; 793f6bed0efSShaohua Li block->seq = cpu_to_le64(log->seq); 794f6bed0efSShaohua Li block->position = cpu_to_le64(log->log_start); 795f6bed0efSShaohua Li 796f6bed0efSShaohua Li io->log_start = log->log_start; 797f6bed0efSShaohua Li io->meta_offset = sizeof(struct r5l_meta_block); 7982b8ef16eSChristoph Hellwig io->seq = log->seq++; 799f6bed0efSShaohua Li 8006143e2ceSChristoph Hellwig io->current_bio = r5l_bio_alloc(log); 8016143e2ceSChristoph Hellwig io->current_bio->bi_end_io = r5l_log_endio; 8026143e2ceSChristoph Hellwig io->current_bio->bi_private = io; 803b349feb3SChristoph Hellwig bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); 804f6bed0efSShaohua Li 805c1b99198SChristoph Hellwig r5_reserve_log_entry(log, io); 806f6bed0efSShaohua Li 807f6bed0efSShaohua Li spin_lock_irq(&log->io_list_lock); 808f6bed0efSShaohua Li list_add_tail(&io->log_sibling, &log->running_ios); 809f6bed0efSShaohua Li spin_unlock_irq(&log->io_list_lock); 810f6bed0efSShaohua Li 811f6bed0efSShaohua Li return io; 812f6bed0efSShaohua Li } 813f6bed0efSShaohua Li 814f6bed0efSShaohua Li static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) 815f6bed0efSShaohua Li { 81622581f58SChristoph Hellwig if (log->current_io && 81722581f58SChristoph Hellwig log->current_io->meta_offset + payload_size > PAGE_SIZE) 818f6bed0efSShaohua Li r5l_submit_current_io(log); 819f6bed0efSShaohua Li 8205036c390SChristoph Hellwig if (!log->current_io) { 821f6bed0efSShaohua Li log->current_io = r5l_new_meta(log); 8225036c390SChristoph Hellwig if (!log->current_io) 8235036c390SChristoph Hellwig return -ENOMEM; 8245036c390SChristoph Hellwig } 8255036c390SChristoph Hellwig 826f6bed0efSShaohua Li return 0; 827f6bed0efSShaohua Li } 828f6bed0efSShaohua Li 829f6bed0efSShaohua Li static void r5l_append_payload_meta(struct r5l_log *log, u16 type, 830f6bed0efSShaohua Li sector_t location, 831f6bed0efSShaohua Li u32 checksum1, u32 checksum2, 832f6bed0efSShaohua Li bool checksum2_valid) 833f6bed0efSShaohua Li { 834f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io; 835f6bed0efSShaohua Li struct r5l_payload_data_parity *payload; 836f6bed0efSShaohua Li 837f6bed0efSShaohua Li payload = page_address(io->meta_page) + io->meta_offset; 838f6bed0efSShaohua Li payload->header.type = cpu_to_le16(type); 839f6bed0efSShaohua Li payload->header.flags = cpu_to_le16(0); 840f6bed0efSShaohua Li payload->size = cpu_to_le32((1 + !!checksum2_valid) << 841f6bed0efSShaohua Li (PAGE_SHIFT - 9)); 842f6bed0efSShaohua Li payload->location = cpu_to_le64(location); 843f6bed0efSShaohua Li payload->checksum[0] = cpu_to_le32(checksum1); 844f6bed0efSShaohua Li if (checksum2_valid) 845f6bed0efSShaohua Li payload->checksum[1] = cpu_to_le32(checksum2); 846f6bed0efSShaohua Li 847f6bed0efSShaohua Li io->meta_offset += sizeof(struct r5l_payload_data_parity) + 848f6bed0efSShaohua Li sizeof(__le32) * (1 + !!checksum2_valid); 849f6bed0efSShaohua Li } 850f6bed0efSShaohua Li 851f6bed0efSShaohua Li static void r5l_append_payload_page(struct r5l_log *log, struct page *page) 852f6bed0efSShaohua Li { 853f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io; 854f6bed0efSShaohua Li 8556143e2ceSChristoph Hellwig if (io->need_split_bio) { 8563bddb7f8SSong Liu BUG_ON(io->split_bio); 8573bddb7f8SSong Liu io->split_bio = io->current_bio; 8586143e2ceSChristoph Hellwig io->current_bio = r5l_bio_alloc(log); 8593bddb7f8SSong Liu bio_chain(io->current_bio, io->split_bio); 8603bddb7f8SSong Liu io->need_split_bio = false; 861f6bed0efSShaohua Li } 862f6bed0efSShaohua Li 8636143e2ceSChristoph Hellwig if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) 8646143e2ceSChristoph Hellwig BUG(); 8656143e2ceSChristoph Hellwig 866c1b99198SChristoph Hellwig r5_reserve_log_entry(log, io); 867f6bed0efSShaohua Li } 868f6bed0efSShaohua Li 869ea17481fSSong Liu static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) 870ea17481fSSong Liu { 871ea17481fSSong Liu struct mddev *mddev = log->rdev->mddev; 872ea17481fSSong Liu struct r5conf *conf = mddev->private; 873ea17481fSSong Liu struct r5l_io_unit *io; 874ea17481fSSong Liu struct r5l_payload_flush *payload; 875ea17481fSSong Liu int meta_size; 876ea17481fSSong Liu 877ea17481fSSong Liu /* 878ea17481fSSong Liu * payload_flush requires extra writes to the journal. 879ea17481fSSong Liu * To avoid handling the extra IO in quiesce, just skip 880ea17481fSSong Liu * flush_payload 881ea17481fSSong Liu */ 882ea17481fSSong Liu if (conf->quiesce) 883ea17481fSSong Liu return; 884ea17481fSSong Liu 885ea17481fSSong Liu mutex_lock(&log->io_mutex); 886ea17481fSSong Liu meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64); 887ea17481fSSong Liu 888ea17481fSSong Liu if (r5l_get_meta(log, meta_size)) { 889ea17481fSSong Liu mutex_unlock(&log->io_mutex); 890ea17481fSSong Liu return; 891ea17481fSSong Liu } 892ea17481fSSong Liu 893ea17481fSSong Liu /* current implementation is one stripe per flush payload */ 894ea17481fSSong Liu io = log->current_io; 895ea17481fSSong Liu payload = page_address(io->meta_page) + io->meta_offset; 896ea17481fSSong Liu payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH); 897ea17481fSSong Liu payload->header.flags = cpu_to_le16(0); 898ea17481fSSong Liu payload->size = cpu_to_le32(sizeof(__le64)); 899ea17481fSSong Liu payload->flush_stripes[0] = cpu_to_le64(sect); 900ea17481fSSong Liu io->meta_offset += meta_size; 901a9501d74SSong Liu /* multiple flush payloads count as one pending_stripe */ 902a9501d74SSong Liu if (!io->has_flush_payload) { 903a9501d74SSong Liu io->has_flush_payload = 1; 904a9501d74SSong Liu atomic_inc(&io->pending_stripe); 905a9501d74SSong Liu } 906ea17481fSSong Liu mutex_unlock(&log->io_mutex); 907ea17481fSSong Liu } 908ea17481fSSong Liu 9095036c390SChristoph Hellwig static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, 910f6bed0efSShaohua Li int data_pages, int parity_pages) 911f6bed0efSShaohua Li { 912f6bed0efSShaohua Li int i; 913f6bed0efSShaohua Li int meta_size; 9145036c390SChristoph Hellwig int ret; 915f6bed0efSShaohua Li struct r5l_io_unit *io; 916f6bed0efSShaohua Li 917f6bed0efSShaohua Li meta_size = 918f6bed0efSShaohua Li ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) 919f6bed0efSShaohua Li * data_pages) + 920f6bed0efSShaohua Li sizeof(struct r5l_payload_data_parity) + 921f6bed0efSShaohua Li sizeof(__le32) * parity_pages; 922f6bed0efSShaohua Li 9235036c390SChristoph Hellwig ret = r5l_get_meta(log, meta_size); 9245036c390SChristoph Hellwig if (ret) 9255036c390SChristoph Hellwig return ret; 9265036c390SChristoph Hellwig 927f6bed0efSShaohua Li io = log->current_io; 928f6bed0efSShaohua Li 9293bddb7f8SSong Liu if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state)) 9303bddb7f8SSong Liu io->has_flush = 1; 9313bddb7f8SSong Liu 932f6bed0efSShaohua Li for (i = 0; i < sh->disks; i++) { 9331e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || 9341e6d690bSSong Liu test_bit(R5_InJournal, &sh->dev[i].flags)) 935f6bed0efSShaohua Li continue; 936f6bed0efSShaohua Li if (i == sh->pd_idx || i == sh->qd_idx) 937f6bed0efSShaohua Li continue; 9383bddb7f8SSong Liu if (test_bit(R5_WantFUA, &sh->dev[i].flags) && 9393bddb7f8SSong Liu log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) { 9403bddb7f8SSong Liu io->has_fua = 1; 9413bddb7f8SSong Liu /* 9423bddb7f8SSong Liu * we need to flush journal to make sure recovery can 9433bddb7f8SSong Liu * reach the data with fua flag 9443bddb7f8SSong Liu */ 9453bddb7f8SSong Liu io->has_flush = 1; 9463bddb7f8SSong Liu } 947f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, 948f6bed0efSShaohua Li raid5_compute_blocknr(sh, i, 0), 949f6bed0efSShaohua Li sh->dev[i].log_checksum, 0, false); 950f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[i].page); 951f6bed0efSShaohua Li } 952f6bed0efSShaohua Li 9532ded3703SSong Liu if (parity_pages == 2) { 954f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, 955f6bed0efSShaohua Li sh->sector, sh->dev[sh->pd_idx].log_checksum, 956f6bed0efSShaohua Li sh->dev[sh->qd_idx].log_checksum, true); 957f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); 958f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); 9592ded3703SSong Liu } else if (parity_pages == 1) { 960f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, 961f6bed0efSShaohua Li sh->sector, sh->dev[sh->pd_idx].log_checksum, 962f6bed0efSShaohua Li 0, false); 963f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); 9642ded3703SSong Liu } else /* Just writing data, not parity, in caching phase */ 9652ded3703SSong Liu BUG_ON(parity_pages != 0); 966f6bed0efSShaohua Li 967f6bed0efSShaohua Li list_add_tail(&sh->log_list, &io->stripe_list); 968f6bed0efSShaohua Li atomic_inc(&io->pending_stripe); 969f6bed0efSShaohua Li sh->log_io = io; 9705036c390SChristoph Hellwig 971a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 972a39f7afdSSong Liu return 0; 973a39f7afdSSong Liu 974a39f7afdSSong Liu if (sh->log_start == MaxSector) { 975a39f7afdSSong Liu BUG_ON(!list_empty(&sh->r5c)); 976a39f7afdSSong Liu sh->log_start = io->log_start; 977a39f7afdSSong Liu spin_lock_irq(&log->stripe_in_journal_lock); 978a39f7afdSSong Liu list_add_tail(&sh->r5c, 979a39f7afdSSong Liu &log->stripe_in_journal_list); 980a39f7afdSSong Liu spin_unlock_irq(&log->stripe_in_journal_lock); 981a39f7afdSSong Liu atomic_inc(&log->stripe_in_journal_count); 982a39f7afdSSong Liu } 9835036c390SChristoph Hellwig return 0; 984f6bed0efSShaohua Li } 985f6bed0efSShaohua Li 986a39f7afdSSong Liu /* add stripe to no_space_stripes, and then wake up reclaim */ 987a39f7afdSSong Liu static inline void r5l_add_no_space_stripe(struct r5l_log *log, 988a39f7afdSSong Liu struct stripe_head *sh) 989a39f7afdSSong Liu { 990a39f7afdSSong Liu spin_lock(&log->no_space_stripes_lock); 991a39f7afdSSong Liu list_add_tail(&sh->log_list, &log->no_space_stripes); 992a39f7afdSSong Liu spin_unlock(&log->no_space_stripes_lock); 993a39f7afdSSong Liu } 994a39f7afdSSong Liu 995f6bed0efSShaohua Li /* 996f6bed0efSShaohua Li * running in raid5d, where reclaim could wait for raid5d too (when it flushes 997f6bed0efSShaohua Li * data from log to raid disks), so we shouldn't wait for reclaim here 998f6bed0efSShaohua Li */ 999f6bed0efSShaohua Li int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) 1000f6bed0efSShaohua Li { 1001a39f7afdSSong Liu struct r5conf *conf = sh->raid_conf; 1002f6bed0efSShaohua Li int write_disks = 0; 1003f6bed0efSShaohua Li int data_pages, parity_pages; 1004f6bed0efSShaohua Li int reserve; 1005f6bed0efSShaohua Li int i; 10065036c390SChristoph Hellwig int ret = 0; 1007a39f7afdSSong Liu bool wake_reclaim = false; 1008f6bed0efSShaohua Li 1009f6bed0efSShaohua Li if (!log) 1010f6bed0efSShaohua Li return -EAGAIN; 1011f6bed0efSShaohua Li /* Don't support stripe batch */ 1012f6bed0efSShaohua Li if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || 1013f6bed0efSShaohua Li test_bit(STRIPE_SYNCING, &sh->state)) { 1014f6bed0efSShaohua Li /* the stripe is written to log, we start writing it to raid */ 1015f6bed0efSShaohua Li clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 1016f6bed0efSShaohua Li return -EAGAIN; 1017f6bed0efSShaohua Li } 1018f6bed0efSShaohua Li 10192ded3703SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 10202ded3703SSong Liu 1021f6bed0efSShaohua Li for (i = 0; i < sh->disks; i++) { 1022f6bed0efSShaohua Li void *addr; 1023f6bed0efSShaohua Li 10241e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || 10251e6d690bSSong Liu test_bit(R5_InJournal, &sh->dev[i].flags)) 1026f6bed0efSShaohua Li continue; 10271e6d690bSSong Liu 1028f6bed0efSShaohua Li write_disks++; 1029f6bed0efSShaohua Li /* checksum is already calculated in last run */ 1030f6bed0efSShaohua Li if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 1031f6bed0efSShaohua Li continue; 1032f6bed0efSShaohua Li addr = kmap_atomic(sh->dev[i].page); 10335cb2fbd6SShaohua Li sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, 1034f6bed0efSShaohua Li addr, PAGE_SIZE); 1035f6bed0efSShaohua Li kunmap_atomic(addr); 1036f6bed0efSShaohua Li } 1037f6bed0efSShaohua Li parity_pages = 1 + !!(sh->qd_idx >= 0); 1038f6bed0efSShaohua Li data_pages = write_disks - parity_pages; 1039f6bed0efSShaohua Li 1040f6bed0efSShaohua Li set_bit(STRIPE_LOG_TRAPPED, &sh->state); 1041253f9fd4SShaohua Li /* 1042253f9fd4SShaohua Li * The stripe must enter state machine again to finish the write, so 1043253f9fd4SShaohua Li * don't delay. 1044253f9fd4SShaohua Li */ 1045253f9fd4SShaohua Li clear_bit(STRIPE_DELAYED, &sh->state); 1046f6bed0efSShaohua Li atomic_inc(&sh->count); 1047f6bed0efSShaohua Li 1048f6bed0efSShaohua Li mutex_lock(&log->io_mutex); 1049f6bed0efSShaohua Li /* meta + data */ 1050f6bed0efSShaohua Li reserve = (1 + write_disks) << (PAGE_SHIFT - 9); 1051f6bed0efSShaohua Li 1052a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 1053a39f7afdSSong Liu if (!r5l_has_free_space(log, reserve)) { 1054a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 1055a39f7afdSSong Liu wake_reclaim = true; 10565036c390SChristoph Hellwig } else { 10575036c390SChristoph Hellwig ret = r5l_log_stripe(log, sh, data_pages, parity_pages); 10585036c390SChristoph Hellwig if (ret) { 10595036c390SChristoph Hellwig spin_lock_irq(&log->io_list_lock); 1060a39f7afdSSong Liu list_add_tail(&sh->log_list, 1061a39f7afdSSong Liu &log->no_mem_stripes); 10625036c390SChristoph Hellwig spin_unlock_irq(&log->io_list_lock); 1063f6bed0efSShaohua Li } 10645036c390SChristoph Hellwig } 1065a39f7afdSSong Liu } else { /* R5C_JOURNAL_MODE_WRITE_BACK */ 1066a39f7afdSSong Liu /* 1067a39f7afdSSong Liu * log space critical, do not process stripes that are 1068a39f7afdSSong Liu * not in cache yet (sh->log_start == MaxSector). 1069a39f7afdSSong Liu */ 1070a39f7afdSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 1071a39f7afdSSong Liu sh->log_start == MaxSector) { 1072a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 1073a39f7afdSSong Liu wake_reclaim = true; 1074a39f7afdSSong Liu reserve = 0; 1075a39f7afdSSong Liu } else if (!r5l_has_free_space(log, reserve)) { 1076a39f7afdSSong Liu if (sh->log_start == log->last_checkpoint) 1077a39f7afdSSong Liu BUG(); 1078a39f7afdSSong Liu else 1079a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 1080a39f7afdSSong Liu } else { 1081a39f7afdSSong Liu ret = r5l_log_stripe(log, sh, data_pages, parity_pages); 1082a39f7afdSSong Liu if (ret) { 1083a39f7afdSSong Liu spin_lock_irq(&log->io_list_lock); 1084a39f7afdSSong Liu list_add_tail(&sh->log_list, 1085a39f7afdSSong Liu &log->no_mem_stripes); 1086a39f7afdSSong Liu spin_unlock_irq(&log->io_list_lock); 1087a39f7afdSSong Liu } 1088a39f7afdSSong Liu } 1089a39f7afdSSong Liu } 1090f6bed0efSShaohua Li 10915036c390SChristoph Hellwig mutex_unlock(&log->io_mutex); 1092a39f7afdSSong Liu if (wake_reclaim) 1093a39f7afdSSong Liu r5l_wake_reclaim(log, reserve); 1094f6bed0efSShaohua Li return 0; 1095f6bed0efSShaohua Li } 1096f6bed0efSShaohua Li 1097f6bed0efSShaohua Li void r5l_write_stripe_run(struct r5l_log *log) 1098f6bed0efSShaohua Li { 1099f6bed0efSShaohua Li if (!log) 1100f6bed0efSShaohua Li return; 1101f6bed0efSShaohua Li mutex_lock(&log->io_mutex); 1102f6bed0efSShaohua Li r5l_submit_current_io(log); 1103f6bed0efSShaohua Li mutex_unlock(&log->io_mutex); 1104f6bed0efSShaohua Li } 1105f6bed0efSShaohua Li 1106828cbe98SShaohua Li int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) 1107828cbe98SShaohua Li { 1108828cbe98SShaohua Li if (!log) 1109828cbe98SShaohua Li return -ENODEV; 11103bddb7f8SSong Liu 11113bddb7f8SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 1112828cbe98SShaohua Li /* 11133bddb7f8SSong Liu * in write through (journal only) 11143bddb7f8SSong Liu * we flush log disk cache first, then write stripe data to 11153bddb7f8SSong Liu * raid disks. So if bio is finished, the log disk cache is 11163bddb7f8SSong Liu * flushed already. The recovery guarantees we can recovery 11173bddb7f8SSong Liu * the bio from log disk, so we don't need to flush again 1118828cbe98SShaohua Li */ 1119828cbe98SShaohua Li if (bio->bi_iter.bi_size == 0) { 1120828cbe98SShaohua Li bio_endio(bio); 1121828cbe98SShaohua Li return 0; 1122828cbe98SShaohua Li } 11231eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH; 11243bddb7f8SSong Liu } else { 11253bddb7f8SSong Liu /* write back (with cache) */ 11263bddb7f8SSong Liu if (bio->bi_iter.bi_size == 0) { 11273bddb7f8SSong Liu mutex_lock(&log->io_mutex); 11283bddb7f8SSong Liu r5l_get_meta(log, 0); 11293bddb7f8SSong Liu bio_list_add(&log->current_io->flush_barriers, bio); 11303bddb7f8SSong Liu log->current_io->has_flush = 1; 11313bddb7f8SSong Liu log->current_io->has_null_flush = 1; 11323bddb7f8SSong Liu atomic_inc(&log->current_io->pending_stripe); 11333bddb7f8SSong Liu r5l_submit_current_io(log); 11343bddb7f8SSong Liu mutex_unlock(&log->io_mutex); 11353bddb7f8SSong Liu return 0; 11363bddb7f8SSong Liu } 11373bddb7f8SSong Liu } 1138828cbe98SShaohua Li return -EAGAIN; 1139828cbe98SShaohua Li } 1140828cbe98SShaohua Li 1141f6bed0efSShaohua Li /* This will run after log space is reclaimed */ 1142f6bed0efSShaohua Li static void r5l_run_no_space_stripes(struct r5l_log *log) 1143f6bed0efSShaohua Li { 1144f6bed0efSShaohua Li struct stripe_head *sh; 1145f6bed0efSShaohua Li 1146f6bed0efSShaohua Li spin_lock(&log->no_space_stripes_lock); 1147f6bed0efSShaohua Li while (!list_empty(&log->no_space_stripes)) { 1148f6bed0efSShaohua Li sh = list_first_entry(&log->no_space_stripes, 1149f6bed0efSShaohua Li struct stripe_head, log_list); 1150f6bed0efSShaohua Li list_del_init(&sh->log_list); 1151f6bed0efSShaohua Li set_bit(STRIPE_HANDLE, &sh->state); 1152f6bed0efSShaohua Li raid5_release_stripe(sh); 1153f6bed0efSShaohua Li } 1154f6bed0efSShaohua Li spin_unlock(&log->no_space_stripes_lock); 1155f6bed0efSShaohua Li } 1156f6bed0efSShaohua Li 1157a39f7afdSSong Liu /* 1158a39f7afdSSong Liu * calculate new last_checkpoint 1159a39f7afdSSong Liu * for write through mode, returns log->next_checkpoint 1160a39f7afdSSong Liu * for write back, returns log_start of first sh in stripe_in_journal_list 1161a39f7afdSSong Liu */ 1162a39f7afdSSong Liu static sector_t r5c_calculate_new_cp(struct r5conf *conf) 1163a39f7afdSSong Liu { 1164a39f7afdSSong Liu struct stripe_head *sh; 1165a39f7afdSSong Liu struct r5l_log *log = conf->log; 1166a39f7afdSSong Liu sector_t new_cp; 1167a39f7afdSSong Liu unsigned long flags; 1168a39f7afdSSong Liu 1169a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 1170a39f7afdSSong Liu return log->next_checkpoint; 1171a39f7afdSSong Liu 1172a39f7afdSSong Liu spin_lock_irqsave(&log->stripe_in_journal_lock, flags); 1173a39f7afdSSong Liu if (list_empty(&conf->log->stripe_in_journal_list)) { 1174a39f7afdSSong Liu /* all stripes flushed */ 1175d3014e21SDan Carpenter spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1176a39f7afdSSong Liu return log->next_checkpoint; 1177a39f7afdSSong Liu } 1178a39f7afdSSong Liu sh = list_first_entry(&conf->log->stripe_in_journal_list, 1179a39f7afdSSong Liu struct stripe_head, r5c); 1180a39f7afdSSong Liu new_cp = sh->log_start; 1181a39f7afdSSong Liu spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1182a39f7afdSSong Liu return new_cp; 1183a39f7afdSSong Liu } 1184a39f7afdSSong Liu 118517036461SChristoph Hellwig static sector_t r5l_reclaimable_space(struct r5l_log *log) 118617036461SChristoph Hellwig { 1187a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private; 1188a39f7afdSSong Liu 118917036461SChristoph Hellwig return r5l_ring_distance(log, log->last_checkpoint, 1190a39f7afdSSong Liu r5c_calculate_new_cp(conf)); 119117036461SChristoph Hellwig } 119217036461SChristoph Hellwig 11935036c390SChristoph Hellwig static void r5l_run_no_mem_stripe(struct r5l_log *log) 11945036c390SChristoph Hellwig { 11955036c390SChristoph Hellwig struct stripe_head *sh; 11965036c390SChristoph Hellwig 11975036c390SChristoph Hellwig assert_spin_locked(&log->io_list_lock); 11985036c390SChristoph Hellwig 11995036c390SChristoph Hellwig if (!list_empty(&log->no_mem_stripes)) { 12005036c390SChristoph Hellwig sh = list_first_entry(&log->no_mem_stripes, 12015036c390SChristoph Hellwig struct stripe_head, log_list); 12025036c390SChristoph Hellwig list_del_init(&sh->log_list); 12035036c390SChristoph Hellwig set_bit(STRIPE_HANDLE, &sh->state); 12045036c390SChristoph Hellwig raid5_release_stripe(sh); 12055036c390SChristoph Hellwig } 12065036c390SChristoph Hellwig } 12075036c390SChristoph Hellwig 120804732f74SChristoph Hellwig static bool r5l_complete_finished_ios(struct r5l_log *log) 120917036461SChristoph Hellwig { 121017036461SChristoph Hellwig struct r5l_io_unit *io, *next; 121117036461SChristoph Hellwig bool found = false; 121217036461SChristoph Hellwig 121317036461SChristoph Hellwig assert_spin_locked(&log->io_list_lock); 121417036461SChristoph Hellwig 121504732f74SChristoph Hellwig list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { 121617036461SChristoph Hellwig /* don't change list order */ 121717036461SChristoph Hellwig if (io->state < IO_UNIT_STRIPE_END) 121817036461SChristoph Hellwig break; 121917036461SChristoph Hellwig 122017036461SChristoph Hellwig log->next_checkpoint = io->log_start; 122117036461SChristoph Hellwig 122217036461SChristoph Hellwig list_del(&io->log_sibling); 12235036c390SChristoph Hellwig mempool_free(io, log->io_pool); 12245036c390SChristoph Hellwig r5l_run_no_mem_stripe(log); 122517036461SChristoph Hellwig 122617036461SChristoph Hellwig found = true; 122717036461SChristoph Hellwig } 122817036461SChristoph Hellwig 122917036461SChristoph Hellwig return found; 123017036461SChristoph Hellwig } 123117036461SChristoph Hellwig 1232509ffec7SChristoph Hellwig static void __r5l_stripe_write_finished(struct r5l_io_unit *io) 1233509ffec7SChristoph Hellwig { 1234509ffec7SChristoph Hellwig struct r5l_log *log = io->log; 1235a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private; 1236509ffec7SChristoph Hellwig unsigned long flags; 1237509ffec7SChristoph Hellwig 1238509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags); 1239509ffec7SChristoph Hellwig __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); 124017036461SChristoph Hellwig 124104732f74SChristoph Hellwig if (!r5l_complete_finished_ios(log)) { 124285f2f9a4SShaohua Li spin_unlock_irqrestore(&log->io_list_lock, flags); 124385f2f9a4SShaohua Li return; 124485f2f9a4SShaohua Li } 1245509ffec7SChristoph Hellwig 1246a39f7afdSSong Liu if (r5l_reclaimable_space(log) > log->max_free_space || 1247a39f7afdSSong Liu test_bit(R5C_LOG_TIGHT, &conf->cache_state)) 1248509ffec7SChristoph Hellwig r5l_wake_reclaim(log, 0); 1249509ffec7SChristoph Hellwig 1250509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags); 1251509ffec7SChristoph Hellwig wake_up(&log->iounit_wait); 1252509ffec7SChristoph Hellwig } 1253509ffec7SChristoph Hellwig 12540576b1c6SShaohua Li void r5l_stripe_write_finished(struct stripe_head *sh) 12550576b1c6SShaohua Li { 12560576b1c6SShaohua Li struct r5l_io_unit *io; 12570576b1c6SShaohua Li 12580576b1c6SShaohua Li io = sh->log_io; 12590576b1c6SShaohua Li sh->log_io = NULL; 12600576b1c6SShaohua Li 1261509ffec7SChristoph Hellwig if (io && atomic_dec_and_test(&io->pending_stripe)) 1262509ffec7SChristoph Hellwig __r5l_stripe_write_finished(io); 12630576b1c6SShaohua Li } 12640576b1c6SShaohua Li 1265a8c34f91SShaohua Li static void r5l_log_flush_endio(struct bio *bio) 1266a8c34f91SShaohua Li { 1267a8c34f91SShaohua Li struct r5l_log *log = container_of(bio, struct r5l_log, 1268a8c34f91SShaohua Li flush_bio); 1269a8c34f91SShaohua Li unsigned long flags; 1270a8c34f91SShaohua Li struct r5l_io_unit *io; 1271a8c34f91SShaohua Li 12724e4cbee9SChristoph Hellwig if (bio->bi_status) 12736e74a9cfSShaohua Li md_error(log->rdev->mddev, log->rdev); 12746e74a9cfSShaohua Li 1275a8c34f91SShaohua Li spin_lock_irqsave(&log->io_list_lock, flags); 1276d8858f43SChristoph Hellwig list_for_each_entry(io, &log->flushing_ios, log_sibling) 1277d8858f43SChristoph Hellwig r5l_io_run_stripes(io); 127804732f74SChristoph Hellwig list_splice_tail_init(&log->flushing_ios, &log->finished_ios); 1279a8c34f91SShaohua Li spin_unlock_irqrestore(&log->io_list_lock, flags); 1280a8c34f91SShaohua Li } 1281a8c34f91SShaohua Li 12820576b1c6SShaohua Li /* 12830576b1c6SShaohua Li * Starting dispatch IO to raid. 12840576b1c6SShaohua Li * io_unit(meta) consists of a log. There is one situation we want to avoid. A 12850576b1c6SShaohua Li * broken meta in the middle of a log causes recovery can't find meta at the 12860576b1c6SShaohua Li * head of log. If operations require meta at the head persistent in log, we 12870576b1c6SShaohua Li * must make sure meta before it persistent in log too. A case is: 12880576b1c6SShaohua Li * 12890576b1c6SShaohua Li * stripe data/parity is in log, we start write stripe to raid disks. stripe 12900576b1c6SShaohua Li * data/parity must be persistent in log before we do the write to raid disks. 12910576b1c6SShaohua Li * 12920576b1c6SShaohua Li * The solution is we restrictly maintain io_unit list order. In this case, we 12930576b1c6SShaohua Li * only write stripes of an io_unit to raid disks till the io_unit is the first 12940576b1c6SShaohua Li * one whose data/parity is in log. 12950576b1c6SShaohua Li */ 12960576b1c6SShaohua Li void r5l_flush_stripe_to_raid(struct r5l_log *log) 12970576b1c6SShaohua Li { 1298a8c34f91SShaohua Li bool do_flush; 129956fef7c6SChristoph Hellwig 130056fef7c6SChristoph Hellwig if (!log || !log->need_cache_flush) 13010576b1c6SShaohua Li return; 13020576b1c6SShaohua Li 1303a8c34f91SShaohua Li spin_lock_irq(&log->io_list_lock); 1304a8c34f91SShaohua Li /* flush bio is running */ 1305a8c34f91SShaohua Li if (!list_empty(&log->flushing_ios)) { 1306a8c34f91SShaohua Li spin_unlock_irq(&log->io_list_lock); 13070576b1c6SShaohua Li return; 13080576b1c6SShaohua Li } 1309a8c34f91SShaohua Li list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); 1310a8c34f91SShaohua Li do_flush = !list_empty(&log->flushing_ios); 13110576b1c6SShaohua Li spin_unlock_irq(&log->io_list_lock); 1312a8c34f91SShaohua Li 1313a8c34f91SShaohua Li if (!do_flush) 1314a8c34f91SShaohua Li return; 1315a8c34f91SShaohua Li bio_reset(&log->flush_bio); 131674d46992SChristoph Hellwig bio_set_dev(&log->flush_bio, log->rdev->bdev); 1317a8c34f91SShaohua Li log->flush_bio.bi_end_io = r5l_log_flush_endio; 131870fd7614SChristoph Hellwig log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 13194e49ea4aSMike Christie submit_bio(&log->flush_bio); 13200576b1c6SShaohua Li } 13210576b1c6SShaohua Li 13220576b1c6SShaohua Li static void r5l_write_super(struct r5l_log *log, sector_t cp); 13234b482044SShaohua Li static void r5l_write_super_and_discard_space(struct r5l_log *log, 13244b482044SShaohua Li sector_t end) 13254b482044SShaohua Li { 13264b482044SShaohua Li struct block_device *bdev = log->rdev->bdev; 13274b482044SShaohua Li struct mddev *mddev; 13284b482044SShaohua Li 13294b482044SShaohua Li r5l_write_super(log, end); 13304b482044SShaohua Li 13314b482044SShaohua Li if (!blk_queue_discard(bdev_get_queue(bdev))) 13324b482044SShaohua Li return; 13334b482044SShaohua Li 13344b482044SShaohua Li mddev = log->rdev->mddev; 13354b482044SShaohua Li /* 13368e018c21SShaohua Li * Discard could zero data, so before discard we must make sure 13378e018c21SShaohua Li * superblock is updated to new log tail. Updating superblock (either 13388e018c21SShaohua Li * directly call md_update_sb() or depend on md thread) must hold 13398e018c21SShaohua Li * reconfig mutex. On the other hand, raid5_quiesce is called with 13408e018c21SShaohua Li * reconfig_mutex hold. The first step of raid5_quiesce() is waitting 13418e018c21SShaohua Li * for all IO finish, hence waitting for reclaim thread, while reclaim 13428e018c21SShaohua Li * thread is calling this function and waitting for reconfig mutex. So 13438e018c21SShaohua Li * there is a deadlock. We workaround this issue with a trylock. 13448e018c21SShaohua Li * FIXME: we could miss discard if we can't take reconfig mutex 13454b482044SShaohua Li */ 13462953079cSShaohua Li set_mask_bits(&mddev->sb_flags, 0, 13472953079cSShaohua Li BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 13488e018c21SShaohua Li if (!mddev_trylock(mddev)) 13498e018c21SShaohua Li return; 13504b482044SShaohua Li md_update_sb(mddev, 1); 13518e018c21SShaohua Li mddev_unlock(mddev); 13524b482044SShaohua Li 13536e74a9cfSShaohua Li /* discard IO error really doesn't matter, ignore it */ 13544b482044SShaohua Li if (log->last_checkpoint < end) { 13554b482044SShaohua Li blkdev_issue_discard(bdev, 13564b482044SShaohua Li log->last_checkpoint + log->rdev->data_offset, 13574b482044SShaohua Li end - log->last_checkpoint, GFP_NOIO, 0); 13584b482044SShaohua Li } else { 13594b482044SShaohua Li blkdev_issue_discard(bdev, 13604b482044SShaohua Li log->last_checkpoint + log->rdev->data_offset, 13614b482044SShaohua Li log->device_size - log->last_checkpoint, 13624b482044SShaohua Li GFP_NOIO, 0); 13634b482044SShaohua Li blkdev_issue_discard(bdev, log->rdev->data_offset, end, 13644b482044SShaohua Li GFP_NOIO, 0); 13654b482044SShaohua Li } 13664b482044SShaohua Li } 13674b482044SShaohua Li 1368a39f7afdSSong Liu /* 1369a39f7afdSSong Liu * r5c_flush_stripe moves stripe from cached list to handle_list. When called, 1370a39f7afdSSong Liu * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes. 1371a39f7afdSSong Liu * 1372a39f7afdSSong Liu * must hold conf->device_lock 1373a39f7afdSSong Liu */ 1374a39f7afdSSong Liu static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh) 1375a39f7afdSSong Liu { 1376a39f7afdSSong Liu BUG_ON(list_empty(&sh->lru)); 1377a39f7afdSSong Liu BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 1378a39f7afdSSong Liu BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 1379a39f7afdSSong Liu 1380a39f7afdSSong Liu /* 1381a39f7afdSSong Liu * The stripe is not ON_RELEASE_LIST, so it is safe to call 1382a39f7afdSSong Liu * raid5_release_stripe() while holding conf->device_lock 1383a39f7afdSSong Liu */ 1384a39f7afdSSong Liu BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); 1385a39f7afdSSong Liu assert_spin_locked(&conf->device_lock); 1386a39f7afdSSong Liu 1387a39f7afdSSong Liu list_del_init(&sh->lru); 1388a39f7afdSSong Liu atomic_inc(&sh->count); 1389a39f7afdSSong Liu 1390a39f7afdSSong Liu set_bit(STRIPE_HANDLE, &sh->state); 1391a39f7afdSSong Liu atomic_inc(&conf->active_stripes); 1392a39f7afdSSong Liu r5c_make_stripe_write_out(sh); 1393a39f7afdSSong Liu 1394e33fbb9cSShaohua Li if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) 1395e33fbb9cSShaohua Li atomic_inc(&conf->r5c_flushing_partial_stripes); 1396e33fbb9cSShaohua Li else 1397e33fbb9cSShaohua Li atomic_inc(&conf->r5c_flushing_full_stripes); 1398a39f7afdSSong Liu raid5_release_stripe(sh); 1399a39f7afdSSong Liu } 1400a39f7afdSSong Liu 1401a39f7afdSSong Liu /* 1402a39f7afdSSong Liu * if num == 0, flush all full stripes 1403a39f7afdSSong Liu * if num > 0, flush all full stripes. If less than num full stripes are 1404a39f7afdSSong Liu * flushed, flush some partial stripes until totally num stripes are 1405a39f7afdSSong Liu * flushed or there is no more cached stripes. 1406a39f7afdSSong Liu */ 1407a39f7afdSSong Liu void r5c_flush_cache(struct r5conf *conf, int num) 1408a39f7afdSSong Liu { 1409a39f7afdSSong Liu int count; 1410a39f7afdSSong Liu struct stripe_head *sh, *next; 1411a39f7afdSSong Liu 1412a39f7afdSSong Liu assert_spin_locked(&conf->device_lock); 1413a39f7afdSSong Liu if (!conf->log) 1414a39f7afdSSong Liu return; 1415a39f7afdSSong Liu 1416a39f7afdSSong Liu count = 0; 1417a39f7afdSSong Liu list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { 1418a39f7afdSSong Liu r5c_flush_stripe(conf, sh); 1419a39f7afdSSong Liu count++; 1420a39f7afdSSong Liu } 1421a39f7afdSSong Liu 1422a39f7afdSSong Liu if (count >= num) 1423a39f7afdSSong Liu return; 1424a39f7afdSSong Liu list_for_each_entry_safe(sh, next, 1425a39f7afdSSong Liu &conf->r5c_partial_stripe_list, lru) { 1426a39f7afdSSong Liu r5c_flush_stripe(conf, sh); 1427a39f7afdSSong Liu if (++count >= num) 1428a39f7afdSSong Liu break; 1429a39f7afdSSong Liu } 1430a39f7afdSSong Liu } 1431a39f7afdSSong Liu 1432a39f7afdSSong Liu static void r5c_do_reclaim(struct r5conf *conf) 1433a39f7afdSSong Liu { 1434a39f7afdSSong Liu struct r5l_log *log = conf->log; 1435a39f7afdSSong Liu struct stripe_head *sh; 1436a39f7afdSSong Liu int count = 0; 1437a39f7afdSSong Liu unsigned long flags; 1438a39f7afdSSong Liu int total_cached; 1439a39f7afdSSong Liu int stripes_to_flush; 1440e33fbb9cSShaohua Li int flushing_partial, flushing_full; 1441a39f7afdSSong Liu 1442a39f7afdSSong Liu if (!r5c_is_writeback(log)) 1443a39f7afdSSong Liu return; 1444a39f7afdSSong Liu 1445e33fbb9cSShaohua Li flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes); 1446e33fbb9cSShaohua Li flushing_full = atomic_read(&conf->r5c_flushing_full_stripes); 1447a39f7afdSSong Liu total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + 1448e33fbb9cSShaohua Li atomic_read(&conf->r5c_cached_full_stripes) - 1449e33fbb9cSShaohua Li flushing_full - flushing_partial; 1450a39f7afdSSong Liu 1451a39f7afdSSong Liu if (total_cached > conf->min_nr_stripes * 3 / 4 || 1452a39f7afdSSong Liu atomic_read(&conf->empty_inactive_list_nr) > 0) 1453a39f7afdSSong Liu /* 1454a39f7afdSSong Liu * if stripe cache pressure high, flush all full stripes and 1455a39f7afdSSong Liu * some partial stripes 1456a39f7afdSSong Liu */ 1457a39f7afdSSong Liu stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP; 1458a39f7afdSSong Liu else if (total_cached > conf->min_nr_stripes * 1 / 2 || 1459e33fbb9cSShaohua Li atomic_read(&conf->r5c_cached_full_stripes) - flushing_full > 146084890c03SShaohua Li R5C_FULL_STRIPE_FLUSH_BATCH(conf)) 1461a39f7afdSSong Liu /* 1462a39f7afdSSong Liu * if stripe cache pressure moderate, or if there is many full 1463a39f7afdSSong Liu * stripes,flush all full stripes 1464a39f7afdSSong Liu */ 1465a39f7afdSSong Liu stripes_to_flush = 0; 1466a39f7afdSSong Liu else 1467a39f7afdSSong Liu /* no need to flush */ 1468a39f7afdSSong Liu stripes_to_flush = -1; 1469a39f7afdSSong Liu 1470a39f7afdSSong Liu if (stripes_to_flush >= 0) { 1471a39f7afdSSong Liu spin_lock_irqsave(&conf->device_lock, flags); 1472a39f7afdSSong Liu r5c_flush_cache(conf, stripes_to_flush); 1473a39f7afdSSong Liu spin_unlock_irqrestore(&conf->device_lock, flags); 1474a39f7afdSSong Liu } 1475a39f7afdSSong Liu 1476a39f7afdSSong Liu /* if log space is tight, flush stripes on stripe_in_journal_list */ 1477a39f7afdSSong Liu if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) { 1478a39f7afdSSong Liu spin_lock_irqsave(&log->stripe_in_journal_lock, flags); 1479a39f7afdSSong Liu spin_lock(&conf->device_lock); 1480a39f7afdSSong Liu list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) { 1481a39f7afdSSong Liu /* 1482a39f7afdSSong Liu * stripes on stripe_in_journal_list could be in any 1483a39f7afdSSong Liu * state of the stripe_cache state machine. In this 1484a39f7afdSSong Liu * case, we only want to flush stripe on 1485a39f7afdSSong Liu * r5c_cached_full/partial_stripes. The following 1486a39f7afdSSong Liu * condition makes sure the stripe is on one of the 1487a39f7afdSSong Liu * two lists. 1488a39f7afdSSong Liu */ 1489a39f7afdSSong Liu if (!list_empty(&sh->lru) && 1490a39f7afdSSong Liu !test_bit(STRIPE_HANDLE, &sh->state) && 1491a39f7afdSSong Liu atomic_read(&sh->count) == 0) { 1492a39f7afdSSong Liu r5c_flush_stripe(conf, sh); 1493a39f7afdSSong Liu if (count++ >= R5C_RECLAIM_STRIPE_GROUP) 1494a39f7afdSSong Liu break; 1495a39f7afdSSong Liu } 1496e8fd52eeSShaohua Li } 1497a39f7afdSSong Liu spin_unlock(&conf->device_lock); 1498a39f7afdSSong Liu spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1499a39f7afdSSong Liu } 1500f687a33eSSong Liu 1501f687a33eSSong Liu if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) 1502f687a33eSSong Liu r5l_run_no_space_stripes(log); 1503f687a33eSSong Liu 1504a39f7afdSSong Liu md_wakeup_thread(conf->mddev->thread); 1505a39f7afdSSong Liu } 15064b482044SShaohua Li 15070576b1c6SShaohua Li static void r5l_do_reclaim(struct r5l_log *log) 15080576b1c6SShaohua Li { 1509a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private; 15100576b1c6SShaohua Li sector_t reclaim_target = xchg(&log->reclaim_target, 0); 151117036461SChristoph Hellwig sector_t reclaimable; 151217036461SChristoph Hellwig sector_t next_checkpoint; 1513a39f7afdSSong Liu bool write_super; 15140576b1c6SShaohua Li 15150576b1c6SShaohua Li spin_lock_irq(&log->io_list_lock); 1516a39f7afdSSong Liu write_super = r5l_reclaimable_space(log) > log->max_free_space || 1517a39f7afdSSong Liu reclaim_target != 0 || !list_empty(&log->no_space_stripes); 15180576b1c6SShaohua Li /* 15190576b1c6SShaohua Li * move proper io_unit to reclaim list. We should not change the order. 15200576b1c6SShaohua Li * reclaimable/unreclaimable io_unit can be mixed in the list, we 15210576b1c6SShaohua Li * shouldn't reuse space of an unreclaimable io_unit 15220576b1c6SShaohua Li */ 15230576b1c6SShaohua Li while (1) { 152417036461SChristoph Hellwig reclaimable = r5l_reclaimable_space(log); 152517036461SChristoph Hellwig if (reclaimable >= reclaim_target || 15260576b1c6SShaohua Li (list_empty(&log->running_ios) && 15270576b1c6SShaohua Li list_empty(&log->io_end_ios) && 1528a8c34f91SShaohua Li list_empty(&log->flushing_ios) && 152904732f74SChristoph Hellwig list_empty(&log->finished_ios))) 15300576b1c6SShaohua Li break; 15310576b1c6SShaohua Li 153217036461SChristoph Hellwig md_wakeup_thread(log->rdev->mddev->thread); 153317036461SChristoph Hellwig wait_event_lock_irq(log->iounit_wait, 153417036461SChristoph Hellwig r5l_reclaimable_space(log) > reclaimable, 153517036461SChristoph Hellwig log->io_list_lock); 15360576b1c6SShaohua Li } 153717036461SChristoph Hellwig 1538a39f7afdSSong Liu next_checkpoint = r5c_calculate_new_cp(conf); 15390576b1c6SShaohua Li spin_unlock_irq(&log->io_list_lock); 15400576b1c6SShaohua Li 1541a39f7afdSSong Liu if (reclaimable == 0 || !write_super) 15420576b1c6SShaohua Li return; 15430576b1c6SShaohua Li 15440576b1c6SShaohua Li /* 15450576b1c6SShaohua Li * write_super will flush cache of each raid disk. We must write super 15460576b1c6SShaohua Li * here, because the log area might be reused soon and we don't want to 15470576b1c6SShaohua Li * confuse recovery 15480576b1c6SShaohua Li */ 15494b482044SShaohua Li r5l_write_super_and_discard_space(log, next_checkpoint); 15500576b1c6SShaohua Li 15510576b1c6SShaohua Li mutex_lock(&log->io_mutex); 155217036461SChristoph Hellwig log->last_checkpoint = next_checkpoint; 1553a39f7afdSSong Liu r5c_update_log_state(log); 15540576b1c6SShaohua Li mutex_unlock(&log->io_mutex); 15550576b1c6SShaohua Li 155617036461SChristoph Hellwig r5l_run_no_space_stripes(log); 15570576b1c6SShaohua Li } 15580576b1c6SShaohua Li 15590576b1c6SShaohua Li static void r5l_reclaim_thread(struct md_thread *thread) 15600576b1c6SShaohua Li { 15610576b1c6SShaohua Li struct mddev *mddev = thread->mddev; 15620576b1c6SShaohua Li struct r5conf *conf = mddev->private; 15630576b1c6SShaohua Li struct r5l_log *log = conf->log; 15640576b1c6SShaohua Li 15650576b1c6SShaohua Li if (!log) 15660576b1c6SShaohua Li return; 1567a39f7afdSSong Liu r5c_do_reclaim(conf); 15680576b1c6SShaohua Li r5l_do_reclaim(log); 15690576b1c6SShaohua Li } 15700576b1c6SShaohua Li 1571a39f7afdSSong Liu void r5l_wake_reclaim(struct r5l_log *log, sector_t space) 1572f6bed0efSShaohua Li { 15730576b1c6SShaohua Li unsigned long target; 15740576b1c6SShaohua Li unsigned long new = (unsigned long)space; /* overflow in theory */ 15750576b1c6SShaohua Li 1576a39f7afdSSong Liu if (!log) 1577a39f7afdSSong Liu return; 15780576b1c6SShaohua Li do { 15790576b1c6SShaohua Li target = log->reclaim_target; 15800576b1c6SShaohua Li if (new < target) 15810576b1c6SShaohua Li return; 15820576b1c6SShaohua Li } while (cmpxchg(&log->reclaim_target, target, new) != target); 15830576b1c6SShaohua Li md_wakeup_thread(log->reclaim_thread); 1584f6bed0efSShaohua Li } 1585f6bed0efSShaohua Li 1586e6c033f7SShaohua Li void r5l_quiesce(struct r5l_log *log, int state) 1587e6c033f7SShaohua Li { 15884b482044SShaohua Li struct mddev *mddev; 1589e6c033f7SShaohua Li if (!log || state == 2) 1590e6c033f7SShaohua Li return; 1591ce1ccd07SShaohua Li if (state == 0) 1592ce1ccd07SShaohua Li kthread_unpark(log->reclaim_thread->tsk); 1593ce1ccd07SShaohua Li else if (state == 1) { 15944b482044SShaohua Li /* make sure r5l_write_super_and_discard_space exits */ 15954b482044SShaohua Li mddev = log->rdev->mddev; 15964b482044SShaohua Li wake_up(&mddev->sb_wait); 1597ce1ccd07SShaohua Li kthread_park(log->reclaim_thread->tsk); 1598a39f7afdSSong Liu r5l_wake_reclaim(log, MaxSector); 1599e6c033f7SShaohua Li r5l_do_reclaim(log); 1600e6c033f7SShaohua Li } 1601e6c033f7SShaohua Li } 1602e6c033f7SShaohua Li 16036e74a9cfSShaohua Li bool r5l_log_disk_error(struct r5conf *conf) 16046e74a9cfSShaohua Li { 1605f6b6ec5cSShaohua Li struct r5l_log *log; 1606f6b6ec5cSShaohua Li bool ret; 16077dde2ad3SShaohua Li /* don't allow write if journal disk is missing */ 1608f6b6ec5cSShaohua Li rcu_read_lock(); 1609f6b6ec5cSShaohua Li log = rcu_dereference(conf->log); 1610f6b6ec5cSShaohua Li 1611f6b6ec5cSShaohua Li if (!log) 1612f6b6ec5cSShaohua Li ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 1613f6b6ec5cSShaohua Li else 1614f6b6ec5cSShaohua Li ret = test_bit(Faulty, &log->rdev->flags); 1615f6b6ec5cSShaohua Li rcu_read_unlock(); 1616f6b6ec5cSShaohua Li return ret; 16176e74a9cfSShaohua Li } 16186e74a9cfSShaohua Li 1619effe6ee7SSong Liu #define R5L_RECOVERY_PAGE_POOL_SIZE 256 1620effe6ee7SSong Liu 1621355810d1SShaohua Li struct r5l_recovery_ctx { 1622355810d1SShaohua Li struct page *meta_page; /* current meta */ 1623355810d1SShaohua Li sector_t meta_total_blocks; /* total size of current meta and data */ 1624355810d1SShaohua Li sector_t pos; /* recovery position */ 1625355810d1SShaohua Li u64 seq; /* recovery position seq */ 1626b4c625c6SSong Liu int data_parity_stripes; /* number of data_parity stripes */ 1627b4c625c6SSong Liu int data_only_stripes; /* number of data_only stripes */ 1628b4c625c6SSong Liu struct list_head cached_list; 1629effe6ee7SSong Liu 1630effe6ee7SSong Liu /* 1631effe6ee7SSong Liu * read ahead page pool (ra_pool) 1632effe6ee7SSong Liu * in recovery, log is read sequentially. It is not efficient to 1633effe6ee7SSong Liu * read every page with sync_page_io(). The read ahead page pool 1634effe6ee7SSong Liu * reads multiple pages with one IO, so further log read can 1635effe6ee7SSong Liu * just copy data from the pool. 1636effe6ee7SSong Liu */ 1637effe6ee7SSong Liu struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE]; 1638effe6ee7SSong Liu sector_t pool_offset; /* offset of first page in the pool */ 1639effe6ee7SSong Liu int total_pages; /* total allocated pages */ 1640effe6ee7SSong Liu int valid_pages; /* pages with valid data */ 1641effe6ee7SSong Liu struct bio *ra_bio; /* bio to do the read ahead */ 1642355810d1SShaohua Li }; 1643355810d1SShaohua Li 1644effe6ee7SSong Liu static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, 1645effe6ee7SSong Liu struct r5l_recovery_ctx *ctx) 1646effe6ee7SSong Liu { 1647effe6ee7SSong Liu struct page *page; 1648effe6ee7SSong Liu 1649effe6ee7SSong Liu ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, log->bs); 1650effe6ee7SSong Liu if (!ctx->ra_bio) 1651effe6ee7SSong Liu return -ENOMEM; 1652effe6ee7SSong Liu 1653effe6ee7SSong Liu ctx->valid_pages = 0; 1654effe6ee7SSong Liu ctx->total_pages = 0; 1655effe6ee7SSong Liu while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) { 1656effe6ee7SSong Liu page = alloc_page(GFP_KERNEL); 1657effe6ee7SSong Liu 1658effe6ee7SSong Liu if (!page) 1659effe6ee7SSong Liu break; 1660effe6ee7SSong Liu ctx->ra_pool[ctx->total_pages] = page; 1661effe6ee7SSong Liu ctx->total_pages += 1; 1662effe6ee7SSong Liu } 1663effe6ee7SSong Liu 1664effe6ee7SSong Liu if (ctx->total_pages == 0) { 1665effe6ee7SSong Liu bio_put(ctx->ra_bio); 1666effe6ee7SSong Liu return -ENOMEM; 1667effe6ee7SSong Liu } 1668effe6ee7SSong Liu 1669effe6ee7SSong Liu ctx->pool_offset = 0; 1670effe6ee7SSong Liu return 0; 1671effe6ee7SSong Liu } 1672effe6ee7SSong Liu 1673effe6ee7SSong Liu static void r5l_recovery_free_ra_pool(struct r5l_log *log, 1674effe6ee7SSong Liu struct r5l_recovery_ctx *ctx) 1675effe6ee7SSong Liu { 1676effe6ee7SSong Liu int i; 1677effe6ee7SSong Liu 1678effe6ee7SSong Liu for (i = 0; i < ctx->total_pages; ++i) 1679effe6ee7SSong Liu put_page(ctx->ra_pool[i]); 1680effe6ee7SSong Liu bio_put(ctx->ra_bio); 1681effe6ee7SSong Liu } 1682effe6ee7SSong Liu 1683effe6ee7SSong Liu /* 1684effe6ee7SSong Liu * fetch ctx->valid_pages pages from offset 1685effe6ee7SSong Liu * In normal cases, ctx->valid_pages == ctx->total_pages after the call. 1686effe6ee7SSong Liu * However, if the offset is close to the end of the journal device, 1687effe6ee7SSong Liu * ctx->valid_pages could be smaller than ctx->total_pages 1688effe6ee7SSong Liu */ 1689effe6ee7SSong Liu static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, 1690effe6ee7SSong Liu struct r5l_recovery_ctx *ctx, 1691effe6ee7SSong Liu sector_t offset) 1692effe6ee7SSong Liu { 1693effe6ee7SSong Liu bio_reset(ctx->ra_bio); 169474d46992SChristoph Hellwig bio_set_dev(ctx->ra_bio, log->rdev->bdev); 1695effe6ee7SSong Liu bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); 1696effe6ee7SSong Liu ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; 1697effe6ee7SSong Liu 1698effe6ee7SSong Liu ctx->valid_pages = 0; 1699effe6ee7SSong Liu ctx->pool_offset = offset; 1700effe6ee7SSong Liu 1701effe6ee7SSong Liu while (ctx->valid_pages < ctx->total_pages) { 1702effe6ee7SSong Liu bio_add_page(ctx->ra_bio, 1703effe6ee7SSong Liu ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0); 1704effe6ee7SSong Liu ctx->valid_pages += 1; 1705effe6ee7SSong Liu 1706effe6ee7SSong Liu offset = r5l_ring_add(log, offset, BLOCK_SECTORS); 1707effe6ee7SSong Liu 1708effe6ee7SSong Liu if (offset == 0) /* reached end of the device */ 1709effe6ee7SSong Liu break; 1710effe6ee7SSong Liu } 1711effe6ee7SSong Liu 1712effe6ee7SSong Liu return submit_bio_wait(ctx->ra_bio); 1713effe6ee7SSong Liu } 1714effe6ee7SSong Liu 1715effe6ee7SSong Liu /* 1716effe6ee7SSong Liu * try read a page from the read ahead page pool, if the page is not in the 1717effe6ee7SSong Liu * pool, call r5l_recovery_fetch_ra_pool 1718effe6ee7SSong Liu */ 1719effe6ee7SSong Liu static int r5l_recovery_read_page(struct r5l_log *log, 1720effe6ee7SSong Liu struct r5l_recovery_ctx *ctx, 1721effe6ee7SSong Liu struct page *page, 1722effe6ee7SSong Liu sector_t offset) 1723effe6ee7SSong Liu { 1724effe6ee7SSong Liu int ret; 1725effe6ee7SSong Liu 1726effe6ee7SSong Liu if (offset < ctx->pool_offset || 1727effe6ee7SSong Liu offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) { 1728effe6ee7SSong Liu ret = r5l_recovery_fetch_ra_pool(log, ctx, offset); 1729effe6ee7SSong Liu if (ret) 1730effe6ee7SSong Liu return ret; 1731effe6ee7SSong Liu } 1732effe6ee7SSong Liu 1733effe6ee7SSong Liu BUG_ON(offset < ctx->pool_offset || 1734effe6ee7SSong Liu offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS); 1735effe6ee7SSong Liu 1736effe6ee7SSong Liu memcpy(page_address(page), 1737effe6ee7SSong Liu page_address(ctx->ra_pool[(offset - ctx->pool_offset) >> 1738effe6ee7SSong Liu BLOCK_SECTOR_SHIFT]), 1739effe6ee7SSong Liu PAGE_SIZE); 1740effe6ee7SSong Liu return 0; 1741effe6ee7SSong Liu } 1742effe6ee7SSong Liu 17439ed988f5SSong Liu static int r5l_recovery_read_meta_block(struct r5l_log *log, 1744355810d1SShaohua Li struct r5l_recovery_ctx *ctx) 1745355810d1SShaohua Li { 1746355810d1SShaohua Li struct page *page = ctx->meta_page; 1747355810d1SShaohua Li struct r5l_meta_block *mb; 1748355810d1SShaohua Li u32 crc, stored_crc; 1749effe6ee7SSong Liu int ret; 1750355810d1SShaohua Li 1751effe6ee7SSong Liu ret = r5l_recovery_read_page(log, ctx, page, ctx->pos); 1752effe6ee7SSong Liu if (ret != 0) 1753effe6ee7SSong Liu return ret; 1754355810d1SShaohua Li 1755355810d1SShaohua Li mb = page_address(page); 1756355810d1SShaohua Li stored_crc = le32_to_cpu(mb->checksum); 1757355810d1SShaohua Li mb->checksum = 0; 1758355810d1SShaohua Li 1759355810d1SShaohua Li if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || 1760355810d1SShaohua Li le64_to_cpu(mb->seq) != ctx->seq || 1761355810d1SShaohua Li mb->version != R5LOG_VERSION || 1762355810d1SShaohua Li le64_to_cpu(mb->position) != ctx->pos) 1763355810d1SShaohua Li return -EINVAL; 1764355810d1SShaohua Li 17655cb2fbd6SShaohua Li crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 1766355810d1SShaohua Li if (stored_crc != crc) 1767355810d1SShaohua Li return -EINVAL; 1768355810d1SShaohua Li 1769355810d1SShaohua Li if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) 1770355810d1SShaohua Li return -EINVAL; 1771355810d1SShaohua Li 1772355810d1SShaohua Li ctx->meta_total_blocks = BLOCK_SECTORS; 1773355810d1SShaohua Li 1774355810d1SShaohua Li return 0; 1775355810d1SShaohua Li } 1776355810d1SShaohua Li 17779ed988f5SSong Liu static void 17789ed988f5SSong Liu r5l_recovery_create_empty_meta_block(struct r5l_log *log, 17799ed988f5SSong Liu struct page *page, 17809ed988f5SSong Liu sector_t pos, u64 seq) 1781355810d1SShaohua Li { 1782355810d1SShaohua Li struct r5l_meta_block *mb; 1783355810d1SShaohua Li 1784355810d1SShaohua Li mb = page_address(page); 17859ed988f5SSong Liu clear_page(mb); 1786355810d1SShaohua Li mb->magic = cpu_to_le32(R5LOG_MAGIC); 1787355810d1SShaohua Li mb->version = R5LOG_VERSION; 1788355810d1SShaohua Li mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); 1789355810d1SShaohua Li mb->seq = cpu_to_le64(seq); 1790355810d1SShaohua Li mb->position = cpu_to_le64(pos); 1791355810d1SShaohua Li } 1792355810d1SShaohua Li 1793355810d1SShaohua Li static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, 1794355810d1SShaohua Li u64 seq) 1795355810d1SShaohua Li { 1796355810d1SShaohua Li struct page *page; 1797355810d1SShaohua Li struct r5l_meta_block *mb; 1798355810d1SShaohua Li 17999ed988f5SSong Liu page = alloc_page(GFP_KERNEL); 1800355810d1SShaohua Li if (!page) 1801355810d1SShaohua Li return -ENOMEM; 18029ed988f5SSong Liu r5l_recovery_create_empty_meta_block(log, page, pos, seq); 1803355810d1SShaohua Li mb = page_address(page); 18045c88f403SSong Liu mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 18055c88f403SSong Liu mb, PAGE_SIZE)); 1806796a5cf0SMike Christie if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, 18075a8948f8SJan Kara REQ_SYNC | REQ_FUA, false)) { 1808355810d1SShaohua Li __free_page(page); 1809355810d1SShaohua Li return -EIO; 1810355810d1SShaohua Li } 1811355810d1SShaohua Li __free_page(page); 1812355810d1SShaohua Li return 0; 1813355810d1SShaohua Li } 1814355810d1SShaohua Li 1815b4c625c6SSong Liu /* 1816b4c625c6SSong Liu * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite 1817b4c625c6SSong Liu * to mark valid (potentially not flushed) data in the journal. 1818b4c625c6SSong Liu * 1819b4c625c6SSong Liu * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb, 1820b4c625c6SSong Liu * so there should not be any mismatch here. 1821b4c625c6SSong Liu */ 1822b4c625c6SSong Liu static void r5l_recovery_load_data(struct r5l_log *log, 1823b4c625c6SSong Liu struct stripe_head *sh, 1824b4c625c6SSong Liu struct r5l_recovery_ctx *ctx, 1825b4c625c6SSong Liu struct r5l_payload_data_parity *payload, 1826b4c625c6SSong Liu sector_t log_offset) 1827f6bed0efSShaohua Li { 1828b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 1829b4c625c6SSong Liu struct r5conf *conf = mddev->private; 1830b4c625c6SSong Liu int dd_idx; 1831355810d1SShaohua Li 1832b4c625c6SSong Liu raid5_compute_sector(conf, 1833b4c625c6SSong Liu le64_to_cpu(payload->location), 0, 1834b4c625c6SSong Liu &dd_idx, sh); 1835effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset); 1836b4c625c6SSong Liu sh->dev[dd_idx].log_checksum = 1837b4c625c6SSong Liu le32_to_cpu(payload->checksum[0]); 1838b4c625c6SSong Liu ctx->meta_total_blocks += BLOCK_SECTORS; 1839b4c625c6SSong Liu 1840b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags); 1841b4c625c6SSong Liu set_bit(STRIPE_R5C_CACHING, &sh->state); 1842b4c625c6SSong Liu } 1843b4c625c6SSong Liu 1844b4c625c6SSong Liu static void r5l_recovery_load_parity(struct r5l_log *log, 1845b4c625c6SSong Liu struct stripe_head *sh, 1846b4c625c6SSong Liu struct r5l_recovery_ctx *ctx, 1847b4c625c6SSong Liu struct r5l_payload_data_parity *payload, 1848b4c625c6SSong Liu sector_t log_offset) 1849b4c625c6SSong Liu { 1850b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 1851b4c625c6SSong Liu struct r5conf *conf = mddev->private; 1852b4c625c6SSong Liu 1853b4c625c6SSong Liu ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; 1854effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset); 1855b4c625c6SSong Liu sh->dev[sh->pd_idx].log_checksum = 1856b4c625c6SSong Liu le32_to_cpu(payload->checksum[0]); 1857b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags); 1858b4c625c6SSong Liu 1859b4c625c6SSong Liu if (sh->qd_idx >= 0) { 1860effe6ee7SSong Liu r5l_recovery_read_page( 1861effe6ee7SSong Liu log, ctx, sh->dev[sh->qd_idx].page, 1862effe6ee7SSong Liu r5l_ring_add(log, log_offset, BLOCK_SECTORS)); 1863b4c625c6SSong Liu sh->dev[sh->qd_idx].log_checksum = 1864b4c625c6SSong Liu le32_to_cpu(payload->checksum[1]); 1865b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags); 1866b4c625c6SSong Liu } 1867b4c625c6SSong Liu clear_bit(STRIPE_R5C_CACHING, &sh->state); 1868b4c625c6SSong Liu } 1869b4c625c6SSong Liu 1870b4c625c6SSong Liu static void r5l_recovery_reset_stripe(struct stripe_head *sh) 1871b4c625c6SSong Liu { 1872b4c625c6SSong Liu int i; 1873b4c625c6SSong Liu 1874b4c625c6SSong Liu sh->state = 0; 1875b4c625c6SSong Liu sh->log_start = MaxSector; 1876b4c625c6SSong Liu for (i = sh->disks; i--; ) 1877b4c625c6SSong Liu sh->dev[i].flags = 0; 1878b4c625c6SSong Liu } 1879b4c625c6SSong Liu 1880b4c625c6SSong Liu static void 1881b4c625c6SSong Liu r5l_recovery_replay_one_stripe(struct r5conf *conf, 1882b4c625c6SSong Liu struct stripe_head *sh, 1883b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 1884b4c625c6SSong Liu { 1885b4c625c6SSong Liu struct md_rdev *rdev, *rrdev; 1886b4c625c6SSong Liu int disk_index; 1887b4c625c6SSong Liu int data_count = 0; 1888b4c625c6SSong Liu 1889b4c625c6SSong Liu for (disk_index = 0; disk_index < sh->disks; disk_index++) { 1890b4c625c6SSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) 1891b4c625c6SSong Liu continue; 1892b4c625c6SSong Liu if (disk_index == sh->qd_idx || disk_index == sh->pd_idx) 1893b4c625c6SSong Liu continue; 1894b4c625c6SSong Liu data_count++; 1895b4c625c6SSong Liu } 1896b4c625c6SSong Liu 1897b4c625c6SSong Liu /* 1898b4c625c6SSong Liu * stripes that only have parity must have been flushed 1899b4c625c6SSong Liu * before the crash that we are now recovering from, so 1900b4c625c6SSong Liu * there is nothing more to recovery. 1901b4c625c6SSong Liu */ 1902b4c625c6SSong Liu if (data_count == 0) 1903b4c625c6SSong Liu goto out; 1904b4c625c6SSong Liu 1905b4c625c6SSong Liu for (disk_index = 0; disk_index < sh->disks; disk_index++) { 1906b4c625c6SSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) 1907b4c625c6SSong Liu continue; 1908b4c625c6SSong Liu 1909b4c625c6SSong Liu /* in case device is broken */ 1910b4c625c6SSong Liu rcu_read_lock(); 1911b4c625c6SSong Liu rdev = rcu_dereference(conf->disks[disk_index].rdev); 1912b4c625c6SSong Liu if (rdev) { 1913b4c625c6SSong Liu atomic_inc(&rdev->nr_pending); 1914b4c625c6SSong Liu rcu_read_unlock(); 1915b4c625c6SSong Liu sync_page_io(rdev, sh->sector, PAGE_SIZE, 1916b4c625c6SSong Liu sh->dev[disk_index].page, REQ_OP_WRITE, 0, 1917b4c625c6SSong Liu false); 1918b4c625c6SSong Liu rdev_dec_pending(rdev, rdev->mddev); 1919b4c625c6SSong Liu rcu_read_lock(); 1920b4c625c6SSong Liu } 1921b4c625c6SSong Liu rrdev = rcu_dereference(conf->disks[disk_index].replacement); 1922b4c625c6SSong Liu if (rrdev) { 1923b4c625c6SSong Liu atomic_inc(&rrdev->nr_pending); 1924b4c625c6SSong Liu rcu_read_unlock(); 1925b4c625c6SSong Liu sync_page_io(rrdev, sh->sector, PAGE_SIZE, 1926b4c625c6SSong Liu sh->dev[disk_index].page, REQ_OP_WRITE, 0, 1927b4c625c6SSong Liu false); 1928b4c625c6SSong Liu rdev_dec_pending(rrdev, rrdev->mddev); 1929b4c625c6SSong Liu rcu_read_lock(); 1930b4c625c6SSong Liu } 1931b4c625c6SSong Liu rcu_read_unlock(); 1932b4c625c6SSong Liu } 1933b4c625c6SSong Liu ctx->data_parity_stripes++; 1934b4c625c6SSong Liu out: 1935b4c625c6SSong Liu r5l_recovery_reset_stripe(sh); 1936b4c625c6SSong Liu } 1937b4c625c6SSong Liu 1938b4c625c6SSong Liu static struct stripe_head * 1939b4c625c6SSong Liu r5c_recovery_alloc_stripe(struct r5conf *conf, 19403c66abbaSSong Liu sector_t stripe_sect) 1941b4c625c6SSong Liu { 1942b4c625c6SSong Liu struct stripe_head *sh; 1943b4c625c6SSong Liu 1944b4c625c6SSong Liu sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); 1945b4c625c6SSong Liu if (!sh) 1946b4c625c6SSong Liu return NULL; /* no more stripe available */ 1947b4c625c6SSong Liu 1948b4c625c6SSong Liu r5l_recovery_reset_stripe(sh); 1949b4c625c6SSong Liu 1950b4c625c6SSong Liu return sh; 1951b4c625c6SSong Liu } 1952b4c625c6SSong Liu 1953b4c625c6SSong Liu static struct stripe_head * 1954b4c625c6SSong Liu r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect) 1955b4c625c6SSong Liu { 1956b4c625c6SSong Liu struct stripe_head *sh; 1957b4c625c6SSong Liu 1958b4c625c6SSong Liu list_for_each_entry(sh, list, lru) 1959b4c625c6SSong Liu if (sh->sector == sect) 1960b4c625c6SSong Liu return sh; 1961b4c625c6SSong Liu return NULL; 1962b4c625c6SSong Liu } 1963b4c625c6SSong Liu 1964b4c625c6SSong Liu static void 1965b4c625c6SSong Liu r5c_recovery_drop_stripes(struct list_head *cached_stripe_list, 1966b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 1967b4c625c6SSong Liu { 1968b4c625c6SSong Liu struct stripe_head *sh, *next; 1969b4c625c6SSong Liu 1970b4c625c6SSong Liu list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { 1971b4c625c6SSong Liu r5l_recovery_reset_stripe(sh); 1972b4c625c6SSong Liu list_del_init(&sh->lru); 1973b4c625c6SSong Liu raid5_release_stripe(sh); 1974b4c625c6SSong Liu } 1975b4c625c6SSong Liu } 1976b4c625c6SSong Liu 1977b4c625c6SSong Liu static void 1978b4c625c6SSong Liu r5c_recovery_replay_stripes(struct list_head *cached_stripe_list, 1979b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 1980b4c625c6SSong Liu { 1981b4c625c6SSong Liu struct stripe_head *sh, *next; 1982b4c625c6SSong Liu 1983b4c625c6SSong Liu list_for_each_entry_safe(sh, next, cached_stripe_list, lru) 1984b4c625c6SSong Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { 1985b4c625c6SSong Liu r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx); 1986b4c625c6SSong Liu list_del_init(&sh->lru); 1987b4c625c6SSong Liu raid5_release_stripe(sh); 1988b4c625c6SSong Liu } 1989b4c625c6SSong Liu } 1990b4c625c6SSong Liu 1991b4c625c6SSong Liu /* if matches return 0; otherwise return -EINVAL */ 1992b4c625c6SSong Liu static int 1993effe6ee7SSong Liu r5l_recovery_verify_data_checksum(struct r5l_log *log, 1994effe6ee7SSong Liu struct r5l_recovery_ctx *ctx, 1995effe6ee7SSong Liu struct page *page, 1996b4c625c6SSong Liu sector_t log_offset, __le32 log_checksum) 1997b4c625c6SSong Liu { 1998b4c625c6SSong Liu void *addr; 1999b4c625c6SSong Liu u32 checksum; 2000b4c625c6SSong Liu 2001effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, page, log_offset); 2002b4c625c6SSong Liu addr = kmap_atomic(page); 2003b4c625c6SSong Liu checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); 2004b4c625c6SSong Liu kunmap_atomic(addr); 2005b4c625c6SSong Liu return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL; 2006b4c625c6SSong Liu } 2007b4c625c6SSong Liu 2008b4c625c6SSong Liu /* 2009b4c625c6SSong Liu * before loading data to stripe cache, we need verify checksum for all data, 2010b4c625c6SSong Liu * if there is mismatch for any data page, we drop all data in the mata block 2011b4c625c6SSong Liu */ 2012b4c625c6SSong Liu static int 2013b4c625c6SSong Liu r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log, 2014b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 2015b4c625c6SSong Liu { 2016b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 2017b4c625c6SSong Liu struct r5conf *conf = mddev->private; 2018b4c625c6SSong Liu struct r5l_meta_block *mb = page_address(ctx->meta_page); 2019b4c625c6SSong Liu sector_t mb_offset = sizeof(struct r5l_meta_block); 2020b4c625c6SSong Liu sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2021b4c625c6SSong Liu struct page *page; 2022b4c625c6SSong Liu struct r5l_payload_data_parity *payload; 20232d4f4687SSong Liu struct r5l_payload_flush *payload_flush; 2024b4c625c6SSong Liu 2025b4c625c6SSong Liu page = alloc_page(GFP_KERNEL); 2026b4c625c6SSong Liu if (!page) 2027355810d1SShaohua Li return -ENOMEM; 2028355810d1SShaohua Li 2029b4c625c6SSong Liu while (mb_offset < le32_to_cpu(mb->meta_size)) { 2030b4c625c6SSong Liu payload = (void *)mb + mb_offset; 20312d4f4687SSong Liu payload_flush = (void *)mb + mb_offset; 2032b4c625c6SSong Liu 20331ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { 2034b4c625c6SSong Liu if (r5l_recovery_verify_data_checksum( 2035effe6ee7SSong Liu log, ctx, page, log_offset, 2036b4c625c6SSong Liu payload->checksum[0]) < 0) 2037b4c625c6SSong Liu goto mismatch; 20381ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) { 2039b4c625c6SSong Liu if (r5l_recovery_verify_data_checksum( 2040effe6ee7SSong Liu log, ctx, page, log_offset, 2041b4c625c6SSong Liu payload->checksum[0]) < 0) 2042b4c625c6SSong Liu goto mismatch; 2043b4c625c6SSong Liu if (conf->max_degraded == 2 && /* q for RAID 6 */ 2044b4c625c6SSong Liu r5l_recovery_verify_data_checksum( 2045effe6ee7SSong Liu log, ctx, page, 2046b4c625c6SSong Liu r5l_ring_add(log, log_offset, 2047b4c625c6SSong Liu BLOCK_SECTORS), 2048b4c625c6SSong Liu payload->checksum[1]) < 0) 2049b4c625c6SSong Liu goto mismatch; 20501ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 20512d4f4687SSong Liu /* nothing to do for R5LOG_PAYLOAD_FLUSH here */ 20522d4f4687SSong Liu } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */ 2053b4c625c6SSong Liu goto mismatch; 2054b4c625c6SSong Liu 20551ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 20562d4f4687SSong Liu mb_offset += sizeof(struct r5l_payload_flush) + 20572d4f4687SSong Liu le32_to_cpu(payload_flush->size); 20582d4f4687SSong Liu } else { 20592d4f4687SSong Liu /* DATA or PARITY payload */ 2060b4c625c6SSong Liu log_offset = r5l_ring_add(log, log_offset, 2061b4c625c6SSong Liu le32_to_cpu(payload->size)); 2062b4c625c6SSong Liu mb_offset += sizeof(struct r5l_payload_data_parity) + 2063b4c625c6SSong Liu sizeof(__le32) * 2064b4c625c6SSong Liu (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); 2065b4c625c6SSong Liu } 2066b4c625c6SSong Liu 20672d4f4687SSong Liu } 20682d4f4687SSong Liu 2069b4c625c6SSong Liu put_page(page); 2070b4c625c6SSong Liu return 0; 2071b4c625c6SSong Liu 2072b4c625c6SSong Liu mismatch: 2073b4c625c6SSong Liu put_page(page); 2074b4c625c6SSong Liu return -EINVAL; 2075b4c625c6SSong Liu } 2076b4c625c6SSong Liu 2077b4c625c6SSong Liu /* 2078b4c625c6SSong Liu * Analyze all data/parity pages in one meta block 2079b4c625c6SSong Liu * Returns: 2080b4c625c6SSong Liu * 0 for success 2081b4c625c6SSong Liu * -EINVAL for unknown playload type 2082b4c625c6SSong Liu * -EAGAIN for checksum mismatch of data page 2083b4c625c6SSong Liu * -ENOMEM for run out of memory (alloc_page failed or run out of stripes) 2084b4c625c6SSong Liu */ 2085b4c625c6SSong Liu static int 2086b4c625c6SSong Liu r5c_recovery_analyze_meta_block(struct r5l_log *log, 2087b4c625c6SSong Liu struct r5l_recovery_ctx *ctx, 2088b4c625c6SSong Liu struct list_head *cached_stripe_list) 2089b4c625c6SSong Liu { 2090b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 2091b4c625c6SSong Liu struct r5conf *conf = mddev->private; 2092b4c625c6SSong Liu struct r5l_meta_block *mb; 2093b4c625c6SSong Liu struct r5l_payload_data_parity *payload; 20942d4f4687SSong Liu struct r5l_payload_flush *payload_flush; 2095b4c625c6SSong Liu int mb_offset; 2096b4c625c6SSong Liu sector_t log_offset; 2097b4c625c6SSong Liu sector_t stripe_sect; 2098b4c625c6SSong Liu struct stripe_head *sh; 2099b4c625c6SSong Liu int ret; 2100b4c625c6SSong Liu 2101b4c625c6SSong Liu /* 2102b4c625c6SSong Liu * for mismatch in data blocks, we will drop all data in this mb, but 2103b4c625c6SSong Liu * we will still read next mb for other data with FLUSH flag, as 2104b4c625c6SSong Liu * io_unit could finish out of order. 2105b4c625c6SSong Liu */ 2106b4c625c6SSong Liu ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx); 2107b4c625c6SSong Liu if (ret == -EINVAL) 2108b4c625c6SSong Liu return -EAGAIN; 2109b4c625c6SSong Liu else if (ret) 2110b4c625c6SSong Liu return ret; /* -ENOMEM duo to alloc_page() failed */ 2111b4c625c6SSong Liu 2112b4c625c6SSong Liu mb = page_address(ctx->meta_page); 2113b4c625c6SSong Liu mb_offset = sizeof(struct r5l_meta_block); 2114b4c625c6SSong Liu log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2115b4c625c6SSong Liu 2116b4c625c6SSong Liu while (mb_offset < le32_to_cpu(mb->meta_size)) { 2117b4c625c6SSong Liu int dd; 2118b4c625c6SSong Liu 2119b4c625c6SSong Liu payload = (void *)mb + mb_offset; 21202d4f4687SSong Liu payload_flush = (void *)mb + mb_offset; 21212d4f4687SSong Liu 21221ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 21232d4f4687SSong Liu int i, count; 21242d4f4687SSong Liu 21252d4f4687SSong Liu count = le32_to_cpu(payload_flush->size) / sizeof(__le64); 21262d4f4687SSong Liu for (i = 0; i < count; ++i) { 21272d4f4687SSong Liu stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]); 21282d4f4687SSong Liu sh = r5c_recovery_lookup_stripe(cached_stripe_list, 21292d4f4687SSong Liu stripe_sect); 21302d4f4687SSong Liu if (sh) { 21312d4f4687SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 21322d4f4687SSong Liu r5l_recovery_reset_stripe(sh); 21332d4f4687SSong Liu list_del_init(&sh->lru); 21342d4f4687SSong Liu raid5_release_stripe(sh); 21352d4f4687SSong Liu } 21362d4f4687SSong Liu } 21372d4f4687SSong Liu 21382d4f4687SSong Liu mb_offset += sizeof(struct r5l_payload_flush) + 21392d4f4687SSong Liu le32_to_cpu(payload_flush->size); 21402d4f4687SSong Liu continue; 21412d4f4687SSong Liu } 21422d4f4687SSong Liu 21432d4f4687SSong Liu /* DATA or PARITY payload */ 21441ad45a9bSJason Yan stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ? 2145b4c625c6SSong Liu raid5_compute_sector( 2146b4c625c6SSong Liu conf, le64_to_cpu(payload->location), 0, &dd, 2147b4c625c6SSong Liu NULL) 2148b4c625c6SSong Liu : le64_to_cpu(payload->location); 2149b4c625c6SSong Liu 2150b4c625c6SSong Liu sh = r5c_recovery_lookup_stripe(cached_stripe_list, 2151b4c625c6SSong Liu stripe_sect); 2152b4c625c6SSong Liu 2153b4c625c6SSong Liu if (!sh) { 21543c66abbaSSong Liu sh = r5c_recovery_alloc_stripe(conf, stripe_sect); 2155b4c625c6SSong Liu /* 2156b4c625c6SSong Liu * cannot get stripe from raid5_get_active_stripe 2157b4c625c6SSong Liu * try replay some stripes 2158b4c625c6SSong Liu */ 2159b4c625c6SSong Liu if (!sh) { 2160b4c625c6SSong Liu r5c_recovery_replay_stripes( 2161b4c625c6SSong Liu cached_stripe_list, ctx); 2162b4c625c6SSong Liu sh = r5c_recovery_alloc_stripe( 21633c66abbaSSong Liu conf, stripe_sect); 2164b4c625c6SSong Liu } 2165b4c625c6SSong Liu if (!sh) { 2166b4c625c6SSong Liu pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2167b4c625c6SSong Liu mdname(mddev), 2168b4c625c6SSong Liu conf->min_nr_stripes * 2); 2169b4c625c6SSong Liu raid5_set_cache_size(mddev, 2170b4c625c6SSong Liu conf->min_nr_stripes * 2); 21713c66abbaSSong Liu sh = r5c_recovery_alloc_stripe(conf, 21723c66abbaSSong Liu stripe_sect); 2173b4c625c6SSong Liu } 2174b4c625c6SSong Liu if (!sh) { 2175b4c625c6SSong Liu pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2176b4c625c6SSong Liu mdname(mddev)); 2177b4c625c6SSong Liu return -ENOMEM; 2178b4c625c6SSong Liu } 2179b4c625c6SSong Liu list_add_tail(&sh->lru, cached_stripe_list); 2180b4c625c6SSong Liu } 2181b4c625c6SSong Liu 21821ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { 2183f7b7bee7SZhengyuan Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 2184f7b7bee7SZhengyuan Liu test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { 2185b4c625c6SSong Liu r5l_recovery_replay_one_stripe(conf, sh, ctx); 2186b4c625c6SSong Liu list_move_tail(&sh->lru, cached_stripe_list); 2187b4c625c6SSong Liu } 2188b4c625c6SSong Liu r5l_recovery_load_data(log, sh, ctx, payload, 2189b4c625c6SSong Liu log_offset); 21901ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) 2191b4c625c6SSong Liu r5l_recovery_load_parity(log, sh, ctx, payload, 2192b4c625c6SSong Liu log_offset); 2193b4c625c6SSong Liu else 2194b4c625c6SSong Liu return -EINVAL; 2195b4c625c6SSong Liu 2196b4c625c6SSong Liu log_offset = r5l_ring_add(log, log_offset, 2197b4c625c6SSong Liu le32_to_cpu(payload->size)); 2198b4c625c6SSong Liu 2199b4c625c6SSong Liu mb_offset += sizeof(struct r5l_payload_data_parity) + 2200b4c625c6SSong Liu sizeof(__le32) * 2201b4c625c6SSong Liu (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); 2202b4c625c6SSong Liu } 2203b4c625c6SSong Liu 2204b4c625c6SSong Liu return 0; 2205b4c625c6SSong Liu } 2206b4c625c6SSong Liu 2207b4c625c6SSong Liu /* 2208b4c625c6SSong Liu * Load the stripe into cache. The stripe will be written out later by 2209b4c625c6SSong Liu * the stripe cache state machine. 2210b4c625c6SSong Liu */ 2211b4c625c6SSong Liu static void r5c_recovery_load_one_stripe(struct r5l_log *log, 2212b4c625c6SSong Liu struct stripe_head *sh) 2213b4c625c6SSong Liu { 2214b4c625c6SSong Liu struct r5dev *dev; 2215b4c625c6SSong Liu int i; 2216b4c625c6SSong Liu 2217b4c625c6SSong Liu for (i = sh->disks; i--; ) { 2218b4c625c6SSong Liu dev = sh->dev + i; 2219b4c625c6SSong Liu if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) { 2220b4c625c6SSong Liu set_bit(R5_InJournal, &dev->flags); 2221b4c625c6SSong Liu set_bit(R5_UPTODATE, &dev->flags); 2222b4c625c6SSong Liu } 2223b4c625c6SSong Liu } 2224b4c625c6SSong Liu } 2225b4c625c6SSong Liu 2226b4c625c6SSong Liu /* 2227b4c625c6SSong Liu * Scan through the log for all to-be-flushed data 2228b4c625c6SSong Liu * 2229b4c625c6SSong Liu * For stripes with data and parity, namely Data-Parity stripe 2230b4c625c6SSong Liu * (STRIPE_R5C_CACHING == 0), we simply replay all the writes. 2231b4c625c6SSong Liu * 2232b4c625c6SSong Liu * For stripes with only data, namely Data-Only stripe 2233b4c625c6SSong Liu * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine. 2234b4c625c6SSong Liu * 2235b4c625c6SSong Liu * For a stripe, if we see data after parity, we should discard all previous 2236b4c625c6SSong Liu * data and parity for this stripe, as these data are already flushed to 2237b4c625c6SSong Liu * the array. 2238b4c625c6SSong Liu * 2239b4c625c6SSong Liu * At the end of the scan, we return the new journal_tail, which points to 2240b4c625c6SSong Liu * first data-only stripe on the journal device, or next invalid meta block. 2241b4c625c6SSong Liu */ 2242b4c625c6SSong Liu static int r5c_recovery_flush_log(struct r5l_log *log, 2243b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 2244b4c625c6SSong Liu { 2245bc8f167fSJackieLiu struct stripe_head *sh; 2246b4c625c6SSong Liu int ret = 0; 2247b4c625c6SSong Liu 2248b4c625c6SSong Liu /* scan through the log */ 2249b4c625c6SSong Liu while (1) { 2250b4c625c6SSong Liu if (r5l_recovery_read_meta_block(log, ctx)) 2251b4c625c6SSong Liu break; 2252b4c625c6SSong Liu 2253b4c625c6SSong Liu ret = r5c_recovery_analyze_meta_block(log, ctx, 2254b4c625c6SSong Liu &ctx->cached_list); 2255b4c625c6SSong Liu /* 2256b4c625c6SSong Liu * -EAGAIN means mismatch in data block, in this case, we still 2257b4c625c6SSong Liu * try scan the next metablock 2258b4c625c6SSong Liu */ 2259b4c625c6SSong Liu if (ret && ret != -EAGAIN) 2260b4c625c6SSong Liu break; /* ret == -EINVAL or -ENOMEM */ 2261b4c625c6SSong Liu ctx->seq++; 2262b4c625c6SSong Liu ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); 2263b4c625c6SSong Liu } 2264b4c625c6SSong Liu 2265b4c625c6SSong Liu if (ret == -ENOMEM) { 2266b4c625c6SSong Liu r5c_recovery_drop_stripes(&ctx->cached_list, ctx); 2267b4c625c6SSong Liu return ret; 2268b4c625c6SSong Liu } 2269b4c625c6SSong Liu 2270b4c625c6SSong Liu /* replay data-parity stripes */ 2271b4c625c6SSong Liu r5c_recovery_replay_stripes(&ctx->cached_list, ctx); 2272b4c625c6SSong Liu 2273b4c625c6SSong Liu /* load data-only stripes to stripe cache */ 2274bc8f167fSJackieLiu list_for_each_entry(sh, &ctx->cached_list, lru) { 2275b4c625c6SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 2276b4c625c6SSong Liu r5c_recovery_load_one_stripe(log, sh); 2277b4c625c6SSong Liu ctx->data_only_stripes++; 2278b4c625c6SSong Liu } 2279b4c625c6SSong Liu 2280b4c625c6SSong Liu return 0; 2281b4c625c6SSong Liu } 2282355810d1SShaohua Li 2283355810d1SShaohua Li /* 2284355810d1SShaohua Li * we did a recovery. Now ctx.pos points to an invalid meta block. New 2285355810d1SShaohua Li * log will start here. but we can't let superblock point to last valid 2286355810d1SShaohua Li * meta block. The log might looks like: 2287355810d1SShaohua Li * | meta 1| meta 2| meta 3| 2288355810d1SShaohua Li * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If 2289355810d1SShaohua Li * superblock points to meta 1, we write a new valid meta 2n. if crash 2290355810d1SShaohua Li * happens again, new recovery will start from meta 1. Since meta 2n is 2291355810d1SShaohua Li * valid now, recovery will think meta 3 is valid, which is wrong. 2292355810d1SShaohua Li * The solution is we create a new meta in meta2 with its seq == meta 22933c6edc66SSong Liu * 1's seq + 10000 and let superblock points to meta2. The same recovery 22943c6edc66SSong Liu * will not think meta 3 is a valid meta, because its seq doesn't match 2295355810d1SShaohua Li */ 2296355810d1SShaohua Li 2297b4c625c6SSong Liu /* 2298b4c625c6SSong Liu * Before recovery, the log looks like the following 2299b4c625c6SSong Liu * 2300b4c625c6SSong Liu * --------------------------------------------- 2301b4c625c6SSong Liu * | valid log | invalid log | 2302b4c625c6SSong Liu * --------------------------------------------- 2303b4c625c6SSong Liu * ^ 2304b4c625c6SSong Liu * |- log->last_checkpoint 2305b4c625c6SSong Liu * |- log->last_cp_seq 2306b4c625c6SSong Liu * 2307b4c625c6SSong Liu * Now we scan through the log until we see invalid entry 2308b4c625c6SSong Liu * 2309b4c625c6SSong Liu * --------------------------------------------- 2310b4c625c6SSong Liu * | valid log | invalid log | 2311b4c625c6SSong Liu * --------------------------------------------- 2312b4c625c6SSong Liu * ^ ^ 2313b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos 2314b4c625c6SSong Liu * |- log->last_cp_seq |- ctx->seq 2315b4c625c6SSong Liu * 2316b4c625c6SSong Liu * From this point, we need to increase seq number by 10 to avoid 2317b4c625c6SSong Liu * confusing next recovery. 2318b4c625c6SSong Liu * 2319b4c625c6SSong Liu * --------------------------------------------- 2320b4c625c6SSong Liu * | valid log | invalid log | 2321b4c625c6SSong Liu * --------------------------------------------- 2322b4c625c6SSong Liu * ^ ^ 2323b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+1 23243c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10001 2325b4c625c6SSong Liu * 2326b4c625c6SSong Liu * However, it is not safe to start the state machine yet, because data only 2327b4c625c6SSong Liu * parities are not yet secured in RAID. To save these data only parities, we 2328b4c625c6SSong Liu * rewrite them from seq+11. 2329b4c625c6SSong Liu * 2330b4c625c6SSong Liu * ----------------------------------------------------------------- 2331b4c625c6SSong Liu * | valid log | data only stripes | invalid log | 2332b4c625c6SSong Liu * ----------------------------------------------------------------- 2333b4c625c6SSong Liu * ^ ^ 2334b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+n 23353c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10000+n 2336b4c625c6SSong Liu * 2337b4c625c6SSong Liu * If failure happens again during this process, the recovery can safe start 2338b4c625c6SSong Liu * again from log->last_checkpoint. 2339b4c625c6SSong Liu * 2340b4c625c6SSong Liu * Once data only stripes are rewritten to journal, we move log_tail 2341b4c625c6SSong Liu * 2342b4c625c6SSong Liu * ----------------------------------------------------------------- 2343b4c625c6SSong Liu * | old log | data only stripes | invalid log | 2344b4c625c6SSong Liu * ----------------------------------------------------------------- 2345b4c625c6SSong Liu * ^ ^ 2346b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+n 23473c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10000+n 2348b4c625c6SSong Liu * 2349b4c625c6SSong Liu * Then we can safely start the state machine. If failure happens from this 2350b4c625c6SSong Liu * point on, the recovery will start from new log->last_checkpoint. 2351b4c625c6SSong Liu */ 2352b4c625c6SSong Liu static int 2353b4c625c6SSong Liu r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, 2354b4c625c6SSong Liu struct r5l_recovery_ctx *ctx) 2355b4c625c6SSong Liu { 2356a85dd7b8SSong Liu struct stripe_head *sh; 2357b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev; 2358b4c625c6SSong Liu struct page *page; 23593c66abbaSSong Liu sector_t next_checkpoint = MaxSector; 2360b4c625c6SSong Liu 2361b4c625c6SSong Liu page = alloc_page(GFP_KERNEL); 2362b4c625c6SSong Liu if (!page) { 2363b4c625c6SSong Liu pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n", 2364b4c625c6SSong Liu mdname(mddev)); 2365b4c625c6SSong Liu return -ENOMEM; 2366b4c625c6SSong Liu } 2367b4c625c6SSong Liu 23683c66abbaSSong Liu WARN_ON(list_empty(&ctx->cached_list)); 23693c66abbaSSong Liu 2370a85dd7b8SSong Liu list_for_each_entry(sh, &ctx->cached_list, lru) { 2371b4c625c6SSong Liu struct r5l_meta_block *mb; 2372b4c625c6SSong Liu int i; 2373b4c625c6SSong Liu int offset; 2374b4c625c6SSong Liu sector_t write_pos; 2375b4c625c6SSong Liu 2376b4c625c6SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 2377b4c625c6SSong Liu r5l_recovery_create_empty_meta_block(log, page, 2378b4c625c6SSong Liu ctx->pos, ctx->seq); 2379b4c625c6SSong Liu mb = page_address(page); 2380b4c625c6SSong Liu offset = le32_to_cpu(mb->meta_size); 2381fc833c2aSJackieLiu write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2382b4c625c6SSong Liu 2383b4c625c6SSong Liu for (i = sh->disks; i--; ) { 2384b4c625c6SSong Liu struct r5dev *dev = &sh->dev[i]; 2385b4c625c6SSong Liu struct r5l_payload_data_parity *payload; 2386b4c625c6SSong Liu void *addr; 2387b4c625c6SSong Liu 2388b4c625c6SSong Liu if (test_bit(R5_InJournal, &dev->flags)) { 2389b4c625c6SSong Liu payload = (void *)mb + offset; 2390b4c625c6SSong Liu payload->header.type = cpu_to_le16( 2391b4c625c6SSong Liu R5LOG_PAYLOAD_DATA); 23921ad45a9bSJason Yan payload->size = cpu_to_le32(BLOCK_SECTORS); 2393b4c625c6SSong Liu payload->location = cpu_to_le64( 2394b4c625c6SSong Liu raid5_compute_blocknr(sh, i, 0)); 2395b4c625c6SSong Liu addr = kmap_atomic(dev->page); 2396b4c625c6SSong Liu payload->checksum[0] = cpu_to_le32( 2397b4c625c6SSong Liu crc32c_le(log->uuid_checksum, addr, 2398b4c625c6SSong Liu PAGE_SIZE)); 2399b4c625c6SSong Liu kunmap_atomic(addr); 2400b4c625c6SSong Liu sync_page_io(log->rdev, write_pos, PAGE_SIZE, 2401b4c625c6SSong Liu dev->page, REQ_OP_WRITE, 0, false); 2402b4c625c6SSong Liu write_pos = r5l_ring_add(log, write_pos, 2403b4c625c6SSong Liu BLOCK_SECTORS); 2404b4c625c6SSong Liu offset += sizeof(__le32) + 2405b4c625c6SSong Liu sizeof(struct r5l_payload_data_parity); 2406b4c625c6SSong Liu 2407b4c625c6SSong Liu } 2408b4c625c6SSong Liu } 2409b4c625c6SSong Liu mb->meta_size = cpu_to_le32(offset); 24105c88f403SSong Liu mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 24115c88f403SSong Liu mb, PAGE_SIZE)); 2412b4c625c6SSong Liu sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, 24135a8948f8SJan Kara REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); 2414b4c625c6SSong Liu sh->log_start = ctx->pos; 24153c66abbaSSong Liu list_add_tail(&sh->r5c, &log->stripe_in_journal_list); 24163c66abbaSSong Liu atomic_inc(&log->stripe_in_journal_count); 2417b4c625c6SSong Liu ctx->pos = write_pos; 2418b4c625c6SSong Liu ctx->seq += 1; 24193c66abbaSSong Liu next_checkpoint = sh->log_start; 2420b4c625c6SSong Liu } 24213c66abbaSSong Liu log->next_checkpoint = next_checkpoint; 2422b4c625c6SSong Liu __free_page(page); 2423b4c625c6SSong Liu return 0; 2424b4c625c6SSong Liu } 2425b4c625c6SSong Liu 2426a85dd7b8SSong Liu static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, 2427a85dd7b8SSong Liu struct r5l_recovery_ctx *ctx) 2428a85dd7b8SSong Liu { 2429a85dd7b8SSong Liu struct mddev *mddev = log->rdev->mddev; 2430a85dd7b8SSong Liu struct r5conf *conf = mddev->private; 2431a85dd7b8SSong Liu struct stripe_head *sh, *next; 2432a85dd7b8SSong Liu 2433a85dd7b8SSong Liu if (ctx->data_only_stripes == 0) 2434a85dd7b8SSong Liu return; 2435a85dd7b8SSong Liu 2436a85dd7b8SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK; 2437a85dd7b8SSong Liu 2438a85dd7b8SSong Liu list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { 2439a85dd7b8SSong Liu r5c_make_stripe_write_out(sh); 2440a85dd7b8SSong Liu set_bit(STRIPE_HANDLE, &sh->state); 2441a85dd7b8SSong Liu list_del_init(&sh->lru); 2442a85dd7b8SSong Liu raid5_release_stripe(sh); 2443a85dd7b8SSong Liu } 2444a85dd7b8SSong Liu 2445a85dd7b8SSong Liu md_wakeup_thread(conf->mddev->thread); 2446a85dd7b8SSong Liu /* reuse conf->wait_for_quiescent in recovery */ 2447a85dd7b8SSong Liu wait_event(conf->wait_for_quiescent, 2448a85dd7b8SSong Liu atomic_read(&conf->active_stripes) == 0); 2449a85dd7b8SSong Liu 2450a85dd7b8SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 2451a85dd7b8SSong Liu } 2452a85dd7b8SSong Liu 2453f6bed0efSShaohua Li static int r5l_recovery_log(struct r5l_log *log) 2454f6bed0efSShaohua Li { 24555aabf7c4SSong Liu struct mddev *mddev = log->rdev->mddev; 2456effe6ee7SSong Liu struct r5l_recovery_ctx *ctx; 24575aabf7c4SSong Liu int ret; 245843b96748SJackieLiu sector_t pos; 2459355810d1SShaohua Li 2460effe6ee7SSong Liu ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 2461effe6ee7SSong Liu if (!ctx) 2462355810d1SShaohua Li return -ENOMEM; 2463355810d1SShaohua Li 2464effe6ee7SSong Liu ctx->pos = log->last_checkpoint; 2465effe6ee7SSong Liu ctx->seq = log->last_cp_seq; 2466effe6ee7SSong Liu INIT_LIST_HEAD(&ctx->cached_list); 2467effe6ee7SSong Liu ctx->meta_page = alloc_page(GFP_KERNEL); 2468effe6ee7SSong Liu 2469effe6ee7SSong Liu if (!ctx->meta_page) { 2470effe6ee7SSong Liu ret = -ENOMEM; 2471effe6ee7SSong Liu goto meta_page; 2472effe6ee7SSong Liu } 2473effe6ee7SSong Liu 2474effe6ee7SSong Liu if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) { 2475effe6ee7SSong Liu ret = -ENOMEM; 2476effe6ee7SSong Liu goto ra_pool; 2477effe6ee7SSong Liu } 2478effe6ee7SSong Liu 2479effe6ee7SSong Liu ret = r5c_recovery_flush_log(log, ctx); 2480355810d1SShaohua Li 2481355810d1SShaohua Li if (ret) 2482effe6ee7SSong Liu goto error; 24835aabf7c4SSong Liu 2484effe6ee7SSong Liu pos = ctx->pos; 2485effe6ee7SSong Liu ctx->seq += 10000; 248643b96748SJackieLiu 2487effe6ee7SSong Liu if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0)) 24885aabf7c4SSong Liu pr_debug("md/raid:%s: starting from clean shutdown\n", 24895aabf7c4SSong Liu mdname(mddev)); 2490a85dd7b8SSong Liu else 249199f17890SColin Ian King pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", 2492effe6ee7SSong Liu mdname(mddev), ctx->data_only_stripes, 2493effe6ee7SSong Liu ctx->data_parity_stripes); 24945aabf7c4SSong Liu 2495effe6ee7SSong Liu if (ctx->data_only_stripes == 0) { 2496effe6ee7SSong Liu log->next_checkpoint = ctx->pos; 2497effe6ee7SSong Liu r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++); 2498effe6ee7SSong Liu ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2499effe6ee7SSong Liu } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) { 25005aabf7c4SSong Liu pr_err("md/raid:%s: failed to rewrite stripes to journal\n", 25015aabf7c4SSong Liu mdname(mddev)); 2502effe6ee7SSong Liu ret = -EIO; 2503effe6ee7SSong Liu goto error; 25045aabf7c4SSong Liu } 25055aabf7c4SSong Liu 2506effe6ee7SSong Liu log->log_start = ctx->pos; 2507effe6ee7SSong Liu log->seq = ctx->seq; 250843b96748SJackieLiu log->last_checkpoint = pos; 250943b96748SJackieLiu r5l_write_super(log, pos); 2510a85dd7b8SSong Liu 2511effe6ee7SSong Liu r5c_recovery_flush_data_only_stripes(log, ctx); 2512effe6ee7SSong Liu ret = 0; 2513effe6ee7SSong Liu error: 2514effe6ee7SSong Liu r5l_recovery_free_ra_pool(log, ctx); 2515effe6ee7SSong Liu ra_pool: 2516effe6ee7SSong Liu __free_page(ctx->meta_page); 2517effe6ee7SSong Liu meta_page: 2518effe6ee7SSong Liu kfree(ctx); 2519effe6ee7SSong Liu return ret; 2520f6bed0efSShaohua Li } 2521f6bed0efSShaohua Li 2522f6bed0efSShaohua Li static void r5l_write_super(struct r5l_log *log, sector_t cp) 2523f6bed0efSShaohua Li { 2524f6bed0efSShaohua Li struct mddev *mddev = log->rdev->mddev; 2525f6bed0efSShaohua Li 2526f6bed0efSShaohua Li log->rdev->journal_tail = cp; 25272953079cSShaohua Li set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2528f6bed0efSShaohua Li } 2529f6bed0efSShaohua Li 25302c7da14bSSong Liu static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) 25312c7da14bSSong Liu { 2532a72cbf83SSong Liu struct r5conf *conf; 25332c7da14bSSong Liu int ret; 25342c7da14bSSong Liu 2535a72cbf83SSong Liu ret = mddev_lock(mddev); 2536a72cbf83SSong Liu if (ret) 2537a72cbf83SSong Liu return ret; 2538a72cbf83SSong Liu 2539a72cbf83SSong Liu conf = mddev->private; 2540a72cbf83SSong Liu if (!conf || !conf->log) { 2541a72cbf83SSong Liu mddev_unlock(mddev); 25422c7da14bSSong Liu return 0; 2543a72cbf83SSong Liu } 25442c7da14bSSong Liu 25452c7da14bSSong Liu switch (conf->log->r5c_journal_mode) { 25462c7da14bSSong Liu case R5C_JOURNAL_MODE_WRITE_THROUGH: 25472c7da14bSSong Liu ret = snprintf( 25482c7da14bSSong Liu page, PAGE_SIZE, "[%s] %s\n", 25492c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], 25502c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); 25512c7da14bSSong Liu break; 25522c7da14bSSong Liu case R5C_JOURNAL_MODE_WRITE_BACK: 25532c7da14bSSong Liu ret = snprintf( 25542c7da14bSSong Liu page, PAGE_SIZE, "%s [%s]\n", 25552c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], 25562c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); 25572c7da14bSSong Liu break; 25582c7da14bSSong Liu default: 25592c7da14bSSong Liu ret = 0; 25602c7da14bSSong Liu } 2561a72cbf83SSong Liu mddev_unlock(mddev); 25622c7da14bSSong Liu return ret; 25632c7da14bSSong Liu } 25642c7da14bSSong Liu 256578e470c2SHeinz Mauelshagen /* 256678e470c2SHeinz Mauelshagen * Set journal cache mode on @mddev (external API initially needed by dm-raid). 256778e470c2SHeinz Mauelshagen * 256878e470c2SHeinz Mauelshagen * @mode as defined in 'enum r5c_journal_mode'. 256978e470c2SHeinz Mauelshagen * 257078e470c2SHeinz Mauelshagen */ 257178e470c2SHeinz Mauelshagen int r5c_journal_mode_set(struct mddev *mddev, int mode) 25722c7da14bSSong Liu { 2573b44886c5SSong Liu struct r5conf *conf; 2574b44886c5SSong Liu int err; 25752c7da14bSSong Liu 257678e470c2SHeinz Mauelshagen if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || 257778e470c2SHeinz Mauelshagen mode > R5C_JOURNAL_MODE_WRITE_BACK) 25782c7da14bSSong Liu return -EINVAL; 25792c7da14bSSong Liu 2580b44886c5SSong Liu err = mddev_lock(mddev); 2581b44886c5SSong Liu if (err) 2582b44886c5SSong Liu return err; 2583b44886c5SSong Liu conf = mddev->private; 2584b44886c5SSong Liu if (!conf || !conf->log) { 2585b44886c5SSong Liu mddev_unlock(mddev); 2586b44886c5SSong Liu return -ENODEV; 2587b44886c5SSong Liu } 2588b44886c5SSong Liu 25892e38a37fSSong Liu if (raid5_calc_degraded(conf) > 0 && 2590b44886c5SSong Liu mode == R5C_JOURNAL_MODE_WRITE_BACK) { 2591b44886c5SSong Liu mddev_unlock(mddev); 25922e38a37fSSong Liu return -EINVAL; 2593b44886c5SSong Liu } 25942e38a37fSSong Liu 25952c7da14bSSong Liu mddev_suspend(mddev); 259678e470c2SHeinz Mauelshagen conf->log->r5c_journal_mode = mode; 25972c7da14bSSong Liu mddev_resume(mddev); 2598b44886c5SSong Liu mddev_unlock(mddev); 25992c7da14bSSong Liu 26002c7da14bSSong Liu pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", 260178e470c2SHeinz Mauelshagen mdname(mddev), mode, r5c_journal_mode_str[mode]); 260278e470c2SHeinz Mauelshagen return 0; 260378e470c2SHeinz Mauelshagen } 260478e470c2SHeinz Mauelshagen EXPORT_SYMBOL(r5c_journal_mode_set); 260578e470c2SHeinz Mauelshagen 260678e470c2SHeinz Mauelshagen static ssize_t r5c_journal_mode_store(struct mddev *mddev, 260778e470c2SHeinz Mauelshagen const char *page, size_t length) 260878e470c2SHeinz Mauelshagen { 260978e470c2SHeinz Mauelshagen int mode = ARRAY_SIZE(r5c_journal_mode_str); 261078e470c2SHeinz Mauelshagen size_t len = length; 261178e470c2SHeinz Mauelshagen 261278e470c2SHeinz Mauelshagen if (len < 2) 261378e470c2SHeinz Mauelshagen return -EINVAL; 261478e470c2SHeinz Mauelshagen 261578e470c2SHeinz Mauelshagen if (page[len - 1] == '\n') 261678e470c2SHeinz Mauelshagen len--; 261778e470c2SHeinz Mauelshagen 261878e470c2SHeinz Mauelshagen while (mode--) 261978e470c2SHeinz Mauelshagen if (strlen(r5c_journal_mode_str[mode]) == len && 262078e470c2SHeinz Mauelshagen !strncmp(page, r5c_journal_mode_str[mode], len)) 262178e470c2SHeinz Mauelshagen break; 262278e470c2SHeinz Mauelshagen 262378e470c2SHeinz Mauelshagen return r5c_journal_mode_set(mddev, mode) ?: length; 26242c7da14bSSong Liu } 26252c7da14bSSong Liu 26262c7da14bSSong Liu struct md_sysfs_entry 26272c7da14bSSong Liu r5c_journal_mode = __ATTR(journal_mode, 0644, 26282c7da14bSSong Liu r5c_journal_mode_show, r5c_journal_mode_store); 26292c7da14bSSong Liu 26302ded3703SSong Liu /* 26312ded3703SSong Liu * Try handle write operation in caching phase. This function should only 26322ded3703SSong Liu * be called in write-back mode. 26332ded3703SSong Liu * 26342ded3703SSong Liu * If all outstanding writes can be handled in caching phase, returns 0 26352ded3703SSong Liu * If writes requires write-out phase, call r5c_make_stripe_write_out() 26362ded3703SSong Liu * and returns -EAGAIN 26372ded3703SSong Liu */ 26382ded3703SSong Liu int r5c_try_caching_write(struct r5conf *conf, 26392ded3703SSong Liu struct stripe_head *sh, 26402ded3703SSong Liu struct stripe_head_state *s, 26412ded3703SSong Liu int disks) 26422ded3703SSong Liu { 26432ded3703SSong Liu struct r5l_log *log = conf->log; 26441e6d690bSSong Liu int i; 26451e6d690bSSong Liu struct r5dev *dev; 26461e6d690bSSong Liu int to_cache = 0; 264703b047f4SSong Liu void **pslot; 264803b047f4SSong Liu sector_t tree_index; 264903b047f4SSong Liu int ret; 265003b047f4SSong Liu uintptr_t refcount; 26512ded3703SSong Liu 26522ded3703SSong Liu BUG_ON(!r5c_is_writeback(log)); 26532ded3703SSong Liu 26541e6d690bSSong Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { 26551e6d690bSSong Liu /* 26561e6d690bSSong Liu * There are two different scenarios here: 26571e6d690bSSong Liu * 1. The stripe has some data cached, and it is sent to 26581e6d690bSSong Liu * write-out phase for reclaim 26591e6d690bSSong Liu * 2. The stripe is clean, and this is the first write 26601e6d690bSSong Liu * 26611e6d690bSSong Liu * For 1, return -EAGAIN, so we continue with 26621e6d690bSSong Liu * handle_stripe_dirtying(). 26631e6d690bSSong Liu * 26641e6d690bSSong Liu * For 2, set STRIPE_R5C_CACHING and continue with caching 26651e6d690bSSong Liu * write. 26661e6d690bSSong Liu */ 26671e6d690bSSong Liu 26681e6d690bSSong Liu /* case 1: anything injournal or anything in written */ 26691e6d690bSSong Liu if (s->injournal > 0 || s->written > 0) 26701e6d690bSSong Liu return -EAGAIN; 26711e6d690bSSong Liu /* case 2 */ 26721e6d690bSSong Liu set_bit(STRIPE_R5C_CACHING, &sh->state); 26731e6d690bSSong Liu } 26741e6d690bSSong Liu 26752e38a37fSSong Liu /* 26762e38a37fSSong Liu * When run in degraded mode, array is set to write-through mode. 26772e38a37fSSong Liu * This check helps drain pending write safely in the transition to 26782e38a37fSSong Liu * write-through mode. 26795ddf0440SSong Liu * 26805ddf0440SSong Liu * When a stripe is syncing, the write is also handled in write 26815ddf0440SSong Liu * through mode. 26822e38a37fSSong Liu */ 26835ddf0440SSong Liu if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) { 26842e38a37fSSong Liu r5c_make_stripe_write_out(sh); 26852e38a37fSSong Liu return -EAGAIN; 26862e38a37fSSong Liu } 26872e38a37fSSong Liu 26881e6d690bSSong Liu for (i = disks; i--; ) { 26891e6d690bSSong Liu dev = &sh->dev[i]; 26901e6d690bSSong Liu /* if non-overwrite, use writing-out phase */ 26911e6d690bSSong Liu if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) && 26921e6d690bSSong Liu !test_bit(R5_InJournal, &dev->flags)) { 26932ded3703SSong Liu r5c_make_stripe_write_out(sh); 26942ded3703SSong Liu return -EAGAIN; 26952ded3703SSong Liu } 26961e6d690bSSong Liu } 26971e6d690bSSong Liu 269803b047f4SSong Liu /* if the stripe is not counted in big_stripe_tree, add it now */ 269903b047f4SSong Liu if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && 270003b047f4SSong Liu !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 270103b047f4SSong Liu tree_index = r5c_tree_index(conf, sh->sector); 270203b047f4SSong Liu spin_lock(&log->tree_lock); 270303b047f4SSong Liu pslot = radix_tree_lookup_slot(&log->big_stripe_tree, 270403b047f4SSong Liu tree_index); 270503b047f4SSong Liu if (pslot) { 270603b047f4SSong Liu refcount = (uintptr_t)radix_tree_deref_slot_protected( 270703b047f4SSong Liu pslot, &log->tree_lock) >> 270803b047f4SSong Liu R5C_RADIX_COUNT_SHIFT; 270903b047f4SSong Liu radix_tree_replace_slot( 271003b047f4SSong Liu &log->big_stripe_tree, pslot, 271103b047f4SSong Liu (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT)); 271203b047f4SSong Liu } else { 271303b047f4SSong Liu /* 271403b047f4SSong Liu * this radix_tree_insert can fail safely, so no 271503b047f4SSong Liu * need to call radix_tree_preload() 271603b047f4SSong Liu */ 271703b047f4SSong Liu ret = radix_tree_insert( 271803b047f4SSong Liu &log->big_stripe_tree, tree_index, 271903b047f4SSong Liu (void *)(1 << R5C_RADIX_COUNT_SHIFT)); 272003b047f4SSong Liu if (ret) { 272103b047f4SSong Liu spin_unlock(&log->tree_lock); 272203b047f4SSong Liu r5c_make_stripe_write_out(sh); 272303b047f4SSong Liu return -EAGAIN; 272403b047f4SSong Liu } 272503b047f4SSong Liu } 272603b047f4SSong Liu spin_unlock(&log->tree_lock); 272703b047f4SSong Liu 272803b047f4SSong Liu /* 272903b047f4SSong Liu * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is 273003b047f4SSong Liu * counted in the radix tree 273103b047f4SSong Liu */ 273203b047f4SSong Liu set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state); 273303b047f4SSong Liu atomic_inc(&conf->r5c_cached_partial_stripes); 273403b047f4SSong Liu } 273503b047f4SSong Liu 27361e6d690bSSong Liu for (i = disks; i--; ) { 27371e6d690bSSong Liu dev = &sh->dev[i]; 27381e6d690bSSong Liu if (dev->towrite) { 27391e6d690bSSong Liu set_bit(R5_Wantwrite, &dev->flags); 27401e6d690bSSong Liu set_bit(R5_Wantdrain, &dev->flags); 27411e6d690bSSong Liu set_bit(R5_LOCKED, &dev->flags); 27421e6d690bSSong Liu to_cache++; 27431e6d690bSSong Liu } 27441e6d690bSSong Liu } 27451e6d690bSSong Liu 27461e6d690bSSong Liu if (to_cache) { 27471e6d690bSSong Liu set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 27481e6d690bSSong Liu /* 27491e6d690bSSong Liu * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data() 27501e6d690bSSong Liu * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in 27511e6d690bSSong Liu * r5c_handle_data_cached() 27521e6d690bSSong Liu */ 27531e6d690bSSong Liu set_bit(STRIPE_LOG_TRAPPED, &sh->state); 27541e6d690bSSong Liu } 27551e6d690bSSong Liu 27561e6d690bSSong Liu return 0; 27571e6d690bSSong Liu } 27581e6d690bSSong Liu 27591e6d690bSSong Liu /* 27601e6d690bSSong Liu * free extra pages (orig_page) we allocated for prexor 27611e6d690bSSong Liu */ 27621e6d690bSSong Liu void r5c_release_extra_page(struct stripe_head *sh) 27631e6d690bSSong Liu { 2764d7bd398eSSong Liu struct r5conf *conf = sh->raid_conf; 27651e6d690bSSong Liu int i; 2766d7bd398eSSong Liu bool using_disk_info_extra_page; 2767d7bd398eSSong Liu 2768d7bd398eSSong Liu using_disk_info_extra_page = 2769d7bd398eSSong Liu sh->dev[0].orig_page == conf->disks[0].extra_page; 27701e6d690bSSong Liu 27711e6d690bSSong Liu for (i = sh->disks; i--; ) 27721e6d690bSSong Liu if (sh->dev[i].page != sh->dev[i].orig_page) { 27731e6d690bSSong Liu struct page *p = sh->dev[i].orig_page; 27741e6d690bSSong Liu 27751e6d690bSSong Liu sh->dev[i].orig_page = sh->dev[i].page; 277686aa1397SSong Liu clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); 277786aa1397SSong Liu 2778d7bd398eSSong Liu if (!using_disk_info_extra_page) 27791e6d690bSSong Liu put_page(p); 27801e6d690bSSong Liu } 2781d7bd398eSSong Liu 2782d7bd398eSSong Liu if (using_disk_info_extra_page) { 2783d7bd398eSSong Liu clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state); 2784d7bd398eSSong Liu md_wakeup_thread(conf->mddev->thread); 2785d7bd398eSSong Liu } 2786d7bd398eSSong Liu } 2787d7bd398eSSong Liu 2788d7bd398eSSong Liu void r5c_use_extra_page(struct stripe_head *sh) 2789d7bd398eSSong Liu { 2790d7bd398eSSong Liu struct r5conf *conf = sh->raid_conf; 2791d7bd398eSSong Liu int i; 2792d7bd398eSSong Liu struct r5dev *dev; 2793d7bd398eSSong Liu 2794d7bd398eSSong Liu for (i = sh->disks; i--; ) { 2795d7bd398eSSong Liu dev = &sh->dev[i]; 2796d7bd398eSSong Liu if (dev->orig_page != dev->page) 2797d7bd398eSSong Liu put_page(dev->orig_page); 2798d7bd398eSSong Liu dev->orig_page = conf->disks[i].extra_page; 2799d7bd398eSSong Liu } 28001e6d690bSSong Liu } 28012ded3703SSong Liu 28022ded3703SSong Liu /* 28032ded3703SSong Liu * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the 28042ded3703SSong Liu * stripe is committed to RAID disks. 28052ded3703SSong Liu */ 28062ded3703SSong Liu void r5c_finish_stripe_write_out(struct r5conf *conf, 28072ded3703SSong Liu struct stripe_head *sh, 28082ded3703SSong Liu struct stripe_head_state *s) 28092ded3703SSong Liu { 281003b047f4SSong Liu struct r5l_log *log = conf->log; 28111e6d690bSSong Liu int i; 28121e6d690bSSong Liu int do_wakeup = 0; 281303b047f4SSong Liu sector_t tree_index; 281403b047f4SSong Liu void **pslot; 281503b047f4SSong Liu uintptr_t refcount; 28161e6d690bSSong Liu 281703b047f4SSong Liu if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) 28182ded3703SSong Liu return; 28192ded3703SSong Liu 28202ded3703SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 28212ded3703SSong Liu clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 28222ded3703SSong Liu 282303b047f4SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 28242ded3703SSong Liu return; 28251e6d690bSSong Liu 28261e6d690bSSong Liu for (i = sh->disks; i--; ) { 28271e6d690bSSong Liu clear_bit(R5_InJournal, &sh->dev[i].flags); 28281e6d690bSSong Liu if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 28291e6d690bSSong Liu do_wakeup = 1; 28301e6d690bSSong Liu } 28311e6d690bSSong Liu 28321e6d690bSSong Liu /* 28331e6d690bSSong Liu * analyse_stripe() runs before r5c_finish_stripe_write_out(), 28341e6d690bSSong Liu * We updated R5_InJournal, so we also update s->injournal. 28351e6d690bSSong Liu */ 28361e6d690bSSong Liu s->injournal = 0; 28371e6d690bSSong Liu 28381e6d690bSSong Liu if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 28391e6d690bSSong Liu if (atomic_dec_and_test(&conf->pending_full_writes)) 28401e6d690bSSong Liu md_wakeup_thread(conf->mddev->thread); 28411e6d690bSSong Liu 28421e6d690bSSong Liu if (do_wakeup) 28431e6d690bSSong Liu wake_up(&conf->wait_for_overlap); 2844a39f7afdSSong Liu 284503b047f4SSong Liu spin_lock_irq(&log->stripe_in_journal_lock); 2846a39f7afdSSong Liu list_del_init(&sh->r5c); 284703b047f4SSong Liu spin_unlock_irq(&log->stripe_in_journal_lock); 2848a39f7afdSSong Liu sh->log_start = MaxSector; 284903b047f4SSong Liu 285003b047f4SSong Liu atomic_dec(&log->stripe_in_journal_count); 285103b047f4SSong Liu r5c_update_log_state(log); 285203b047f4SSong Liu 285303b047f4SSong Liu /* stop counting this stripe in big_stripe_tree */ 285403b047f4SSong Liu if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) || 285503b047f4SSong Liu test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 285603b047f4SSong Liu tree_index = r5c_tree_index(conf, sh->sector); 285703b047f4SSong Liu spin_lock(&log->tree_lock); 285803b047f4SSong Liu pslot = radix_tree_lookup_slot(&log->big_stripe_tree, 285903b047f4SSong Liu tree_index); 286003b047f4SSong Liu BUG_ON(pslot == NULL); 286103b047f4SSong Liu refcount = (uintptr_t)radix_tree_deref_slot_protected( 286203b047f4SSong Liu pslot, &log->tree_lock) >> 286303b047f4SSong Liu R5C_RADIX_COUNT_SHIFT; 286403b047f4SSong Liu if (refcount == 1) 286503b047f4SSong Liu radix_tree_delete(&log->big_stripe_tree, tree_index); 286603b047f4SSong Liu else 286703b047f4SSong Liu radix_tree_replace_slot( 286803b047f4SSong Liu &log->big_stripe_tree, pslot, 286903b047f4SSong Liu (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT)); 287003b047f4SSong Liu spin_unlock(&log->tree_lock); 287103b047f4SSong Liu } 287203b047f4SSong Liu 287303b047f4SSong Liu if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) { 287403b047f4SSong Liu BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0); 2875e33fbb9cSShaohua Li atomic_dec(&conf->r5c_flushing_partial_stripes); 287603b047f4SSong Liu atomic_dec(&conf->r5c_cached_partial_stripes); 287703b047f4SSong Liu } 287803b047f4SSong Liu 287903b047f4SSong Liu if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 288003b047f4SSong Liu BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0); 2881e33fbb9cSShaohua Li atomic_dec(&conf->r5c_flushing_full_stripes); 288203b047f4SSong Liu atomic_dec(&conf->r5c_cached_full_stripes); 288303b047f4SSong Liu } 2884ea17481fSSong Liu 2885ea17481fSSong Liu r5l_append_flush_payload(log, sh->sector); 28865ddf0440SSong Liu /* stripe is flused to raid disks, we can do resync now */ 28875ddf0440SSong Liu if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 28885ddf0440SSong Liu set_bit(STRIPE_HANDLE, &sh->state); 28891e6d690bSSong Liu } 28901e6d690bSSong Liu 2891ff875738SArtur Paszkiewicz int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) 28921e6d690bSSong Liu { 2893a39f7afdSSong Liu struct r5conf *conf = sh->raid_conf; 28941e6d690bSSong Liu int pages = 0; 28951e6d690bSSong Liu int reserve; 28961e6d690bSSong Liu int i; 28971e6d690bSSong Liu int ret = 0; 28981e6d690bSSong Liu 28991e6d690bSSong Liu BUG_ON(!log); 29001e6d690bSSong Liu 29011e6d690bSSong Liu for (i = 0; i < sh->disks; i++) { 29021e6d690bSSong Liu void *addr; 29031e6d690bSSong Liu 29041e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) 29051e6d690bSSong Liu continue; 29061e6d690bSSong Liu addr = kmap_atomic(sh->dev[i].page); 29071e6d690bSSong Liu sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, 29081e6d690bSSong Liu addr, PAGE_SIZE); 29091e6d690bSSong Liu kunmap_atomic(addr); 29101e6d690bSSong Liu pages++; 29111e6d690bSSong Liu } 29121e6d690bSSong Liu WARN_ON(pages == 0); 29131e6d690bSSong Liu 29141e6d690bSSong Liu /* 29151e6d690bSSong Liu * The stripe must enter state machine again to call endio, so 29161e6d690bSSong Liu * don't delay. 29171e6d690bSSong Liu */ 29181e6d690bSSong Liu clear_bit(STRIPE_DELAYED, &sh->state); 29191e6d690bSSong Liu atomic_inc(&sh->count); 29201e6d690bSSong Liu 29211e6d690bSSong Liu mutex_lock(&log->io_mutex); 29221e6d690bSSong Liu /* meta + data */ 29231e6d690bSSong Liu reserve = (1 + pages) << (PAGE_SHIFT - 9); 29241e6d690bSSong Liu 2925a39f7afdSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 2926a39f7afdSSong Liu sh->log_start == MaxSector) 2927a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 2928a39f7afdSSong Liu else if (!r5l_has_free_space(log, reserve)) { 2929a39f7afdSSong Liu if (sh->log_start == log->last_checkpoint) 2930a39f7afdSSong Liu BUG(); 2931a39f7afdSSong Liu else 2932a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh); 29331e6d690bSSong Liu } else { 29341e6d690bSSong Liu ret = r5l_log_stripe(log, sh, pages, 0); 29351e6d690bSSong Liu if (ret) { 29361e6d690bSSong Liu spin_lock_irq(&log->io_list_lock); 29371e6d690bSSong Liu list_add_tail(&sh->log_list, &log->no_mem_stripes); 29381e6d690bSSong Liu spin_unlock_irq(&log->io_list_lock); 29391e6d690bSSong Liu } 29401e6d690bSSong Liu } 29411e6d690bSSong Liu 29421e6d690bSSong Liu mutex_unlock(&log->io_mutex); 29431e6d690bSSong Liu return 0; 2944f6bed0efSShaohua Li } 2945f6bed0efSShaohua Li 294603b047f4SSong Liu /* check whether this big stripe is in write back cache. */ 294703b047f4SSong Liu bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect) 294803b047f4SSong Liu { 294903b047f4SSong Liu struct r5l_log *log = conf->log; 295003b047f4SSong Liu sector_t tree_index; 295103b047f4SSong Liu void *slot; 295203b047f4SSong Liu 295303b047f4SSong Liu if (!log) 295403b047f4SSong Liu return false; 295503b047f4SSong Liu 295603b047f4SSong Liu WARN_ON_ONCE(!rcu_read_lock_held()); 295703b047f4SSong Liu tree_index = r5c_tree_index(conf, sect); 295803b047f4SSong Liu slot = radix_tree_lookup(&log->big_stripe_tree, tree_index); 295903b047f4SSong Liu return slot != NULL; 296003b047f4SSong Liu } 296103b047f4SSong Liu 2962f6bed0efSShaohua Li static int r5l_load_log(struct r5l_log *log) 2963f6bed0efSShaohua Li { 2964f6bed0efSShaohua Li struct md_rdev *rdev = log->rdev; 2965f6bed0efSShaohua Li struct page *page; 2966f6bed0efSShaohua Li struct r5l_meta_block *mb; 2967f6bed0efSShaohua Li sector_t cp = log->rdev->journal_tail; 2968f6bed0efSShaohua Li u32 stored_crc, expected_crc; 2969f6bed0efSShaohua Li bool create_super = false; 2970d30dfeb9SJackieLiu int ret = 0; 2971f6bed0efSShaohua Li 2972f6bed0efSShaohua Li /* Make sure it's valid */ 2973f6bed0efSShaohua Li if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) 2974f6bed0efSShaohua Li cp = 0; 2975f6bed0efSShaohua Li page = alloc_page(GFP_KERNEL); 2976f6bed0efSShaohua Li if (!page) 2977f6bed0efSShaohua Li return -ENOMEM; 2978f6bed0efSShaohua Li 2979796a5cf0SMike Christie if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { 2980f6bed0efSShaohua Li ret = -EIO; 2981f6bed0efSShaohua Li goto ioerr; 2982f6bed0efSShaohua Li } 2983f6bed0efSShaohua Li mb = page_address(page); 2984f6bed0efSShaohua Li 2985f6bed0efSShaohua Li if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || 2986f6bed0efSShaohua Li mb->version != R5LOG_VERSION) { 2987f6bed0efSShaohua Li create_super = true; 2988f6bed0efSShaohua Li goto create; 2989f6bed0efSShaohua Li } 2990f6bed0efSShaohua Li stored_crc = le32_to_cpu(mb->checksum); 2991f6bed0efSShaohua Li mb->checksum = 0; 29925cb2fbd6SShaohua Li expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 2993f6bed0efSShaohua Li if (stored_crc != expected_crc) { 2994f6bed0efSShaohua Li create_super = true; 2995f6bed0efSShaohua Li goto create; 2996f6bed0efSShaohua Li } 2997f6bed0efSShaohua Li if (le64_to_cpu(mb->position) != cp) { 2998f6bed0efSShaohua Li create_super = true; 2999f6bed0efSShaohua Li goto create; 3000f6bed0efSShaohua Li } 3001f6bed0efSShaohua Li create: 3002f6bed0efSShaohua Li if (create_super) { 3003f6bed0efSShaohua Li log->last_cp_seq = prandom_u32(); 3004f6bed0efSShaohua Li cp = 0; 300556056c2eSZhengyuan Liu r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq); 3006f6bed0efSShaohua Li /* 3007f6bed0efSShaohua Li * Make sure super points to correct address. Log might have 3008f6bed0efSShaohua Li * data very soon. If super hasn't correct log tail address, 3009f6bed0efSShaohua Li * recovery can't find the log 3010f6bed0efSShaohua Li */ 3011f6bed0efSShaohua Li r5l_write_super(log, cp); 3012f6bed0efSShaohua Li } else 3013f6bed0efSShaohua Li log->last_cp_seq = le64_to_cpu(mb->seq); 3014f6bed0efSShaohua Li 3015f6bed0efSShaohua Li log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); 30160576b1c6SShaohua Li log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; 30170576b1c6SShaohua Li if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) 30180576b1c6SShaohua Li log->max_free_space = RECLAIM_MAX_FREE_SPACE; 3019f6bed0efSShaohua Li log->last_checkpoint = cp; 3020f6bed0efSShaohua Li 3021f6bed0efSShaohua Li __free_page(page); 3022f6bed0efSShaohua Li 3023d30dfeb9SJackieLiu if (create_super) { 3024d30dfeb9SJackieLiu log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS); 3025d30dfeb9SJackieLiu log->seq = log->last_cp_seq + 1; 3026d30dfeb9SJackieLiu log->next_checkpoint = cp; 3027d30dfeb9SJackieLiu } else 30283d7e7e1dSZhengyuan Liu ret = r5l_recovery_log(log); 3029d30dfeb9SJackieLiu 30303d7e7e1dSZhengyuan Liu r5c_update_log_state(log); 30313d7e7e1dSZhengyuan Liu return ret; 3032f6bed0efSShaohua Li ioerr: 3033f6bed0efSShaohua Li __free_page(page); 3034f6bed0efSShaohua Li return ret; 3035f6bed0efSShaohua Li } 3036f6bed0efSShaohua Li 303770d466f7SSong Liu void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) 30382e38a37fSSong Liu { 30392e38a37fSSong Liu struct r5conf *conf = mddev->private; 30402e38a37fSSong Liu struct r5l_log *log = conf->log; 30412e38a37fSSong Liu 30422e38a37fSSong Liu if (!log) 30432e38a37fSSong Liu return; 30442e38a37fSSong Liu 304570d466f7SSong Liu if ((raid5_calc_degraded(conf) > 0 || 304670d466f7SSong Liu test_bit(Journal, &rdev->flags)) && 30472e38a37fSSong Liu conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) 30482e38a37fSSong Liu schedule_work(&log->disable_writeback_work); 30492e38a37fSSong Liu } 30502e38a37fSSong Liu 3051f6bed0efSShaohua Li int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) 3052f6bed0efSShaohua Li { 3053c888a8f9SJens Axboe struct request_queue *q = bdev_get_queue(rdev->bdev); 3054f6bed0efSShaohua Li struct r5l_log *log; 3055ff875738SArtur Paszkiewicz char b[BDEVNAME_SIZE]; 3056ff875738SArtur Paszkiewicz 3057ff875738SArtur Paszkiewicz pr_debug("md/raid:%s: using device %s as journal\n", 3058ff875738SArtur Paszkiewicz mdname(conf->mddev), bdevname(rdev->bdev, b)); 3059f6bed0efSShaohua Li 3060f6bed0efSShaohua Li if (PAGE_SIZE != 4096) 3061f6bed0efSShaohua Li return -EINVAL; 3062c757ec95SSong Liu 3063c757ec95SSong Liu /* 3064c757ec95SSong Liu * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and 3065c757ec95SSong Liu * raid_disks r5l_payload_data_parity. 3066c757ec95SSong Liu * 3067c757ec95SSong Liu * Write journal and cache does not work for very big array 3068c757ec95SSong Liu * (raid_disks > 203) 3069c757ec95SSong Liu */ 3070c757ec95SSong Liu if (sizeof(struct r5l_meta_block) + 3071c757ec95SSong Liu ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) * 3072c757ec95SSong Liu conf->raid_disks) > PAGE_SIZE) { 3073c757ec95SSong Liu pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n", 3074c757ec95SSong Liu mdname(conf->mddev), conf->raid_disks); 3075c757ec95SSong Liu return -EINVAL; 3076c757ec95SSong Liu } 3077c757ec95SSong Liu 3078f6bed0efSShaohua Li log = kzalloc(sizeof(*log), GFP_KERNEL); 3079f6bed0efSShaohua Li if (!log) 3080f6bed0efSShaohua Li return -ENOMEM; 3081f6bed0efSShaohua Li log->rdev = rdev; 3082f6bed0efSShaohua Li 3083c888a8f9SJens Axboe log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0; 308456fef7c6SChristoph Hellwig 30855cb2fbd6SShaohua Li log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, 3086f6bed0efSShaohua Li sizeof(rdev->mddev->uuid)); 3087f6bed0efSShaohua Li 3088f6bed0efSShaohua Li mutex_init(&log->io_mutex); 3089f6bed0efSShaohua Li 3090f6bed0efSShaohua Li spin_lock_init(&log->io_list_lock); 3091f6bed0efSShaohua Li INIT_LIST_HEAD(&log->running_ios); 30920576b1c6SShaohua Li INIT_LIST_HEAD(&log->io_end_ios); 3093a8c34f91SShaohua Li INIT_LIST_HEAD(&log->flushing_ios); 309404732f74SChristoph Hellwig INIT_LIST_HEAD(&log->finished_ios); 30953a83f467SMing Lei bio_init(&log->flush_bio, NULL, 0); 3096f6bed0efSShaohua Li 3097f6bed0efSShaohua Li log->io_kc = KMEM_CACHE(r5l_io_unit, 0); 3098f6bed0efSShaohua Li if (!log->io_kc) 3099f6bed0efSShaohua Li goto io_kc; 3100f6bed0efSShaohua Li 31015036c390SChristoph Hellwig log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc); 31025036c390SChristoph Hellwig if (!log->io_pool) 31035036c390SChristoph Hellwig goto io_pool; 31045036c390SChristoph Hellwig 3105011067b0SNeilBrown log->bs = bioset_create(R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS); 3106c38d29b3SChristoph Hellwig if (!log->bs) 3107c38d29b3SChristoph Hellwig goto io_bs; 3108c38d29b3SChristoph Hellwig 3109e8deb638SChristoph Hellwig log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0); 3110e8deb638SChristoph Hellwig if (!log->meta_pool) 3111e8deb638SChristoph Hellwig goto out_mempool; 3112e8deb638SChristoph Hellwig 311303b047f4SSong Liu spin_lock_init(&log->tree_lock); 311403b047f4SSong Liu INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN); 311503b047f4SSong Liu 31160576b1c6SShaohua Li log->reclaim_thread = md_register_thread(r5l_reclaim_thread, 31170576b1c6SShaohua Li log->rdev->mddev, "reclaim"); 31180576b1c6SShaohua Li if (!log->reclaim_thread) 31190576b1c6SShaohua Li goto reclaim_thread; 3120a39f7afdSSong Liu log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; 3121a39f7afdSSong Liu 31220fd22b45SShaohua Li init_waitqueue_head(&log->iounit_wait); 31230576b1c6SShaohua Li 31245036c390SChristoph Hellwig INIT_LIST_HEAD(&log->no_mem_stripes); 31255036c390SChristoph Hellwig 3126f6bed0efSShaohua Li INIT_LIST_HEAD(&log->no_space_stripes); 3127f6bed0efSShaohua Li spin_lock_init(&log->no_space_stripes_lock); 3128f6bed0efSShaohua Li 31293bddb7f8SSong Liu INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); 31302e38a37fSSong Liu INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); 31313bddb7f8SSong Liu 31322ded3703SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 3133a39f7afdSSong Liu INIT_LIST_HEAD(&log->stripe_in_journal_list); 3134a39f7afdSSong Liu spin_lock_init(&log->stripe_in_journal_lock); 3135a39f7afdSSong Liu atomic_set(&log->stripe_in_journal_count, 0); 31362ded3703SSong Liu 3137d2250f10SSong Liu rcu_assign_pointer(conf->log, log); 3138d2250f10SSong Liu 3139f6bed0efSShaohua Li if (r5l_load_log(log)) 3140f6bed0efSShaohua Li goto error; 3141f6bed0efSShaohua Li 3142a62ab49eSShaohua Li set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 3143f6bed0efSShaohua Li return 0; 3144e8deb638SChristoph Hellwig 3145f6bed0efSShaohua Li error: 3146d2250f10SSong Liu rcu_assign_pointer(conf->log, NULL); 31470576b1c6SShaohua Li md_unregister_thread(&log->reclaim_thread); 31480576b1c6SShaohua Li reclaim_thread: 3149e8deb638SChristoph Hellwig mempool_destroy(log->meta_pool); 3150e8deb638SChristoph Hellwig out_mempool: 3151c38d29b3SChristoph Hellwig bioset_free(log->bs); 3152c38d29b3SChristoph Hellwig io_bs: 31535036c390SChristoph Hellwig mempool_destroy(log->io_pool); 31545036c390SChristoph Hellwig io_pool: 3155f6bed0efSShaohua Li kmem_cache_destroy(log->io_kc); 3156f6bed0efSShaohua Li io_kc: 3157f6bed0efSShaohua Li kfree(log); 3158f6bed0efSShaohua Li return -EINVAL; 3159f6bed0efSShaohua Li } 3160f6bed0efSShaohua Li 3161ff875738SArtur Paszkiewicz void r5l_exit_log(struct r5conf *conf) 3162f6bed0efSShaohua Li { 3163ff875738SArtur Paszkiewicz struct r5l_log *log = conf->log; 3164ff875738SArtur Paszkiewicz 3165ff875738SArtur Paszkiewicz conf->log = NULL; 3166ff875738SArtur Paszkiewicz synchronize_rcu(); 3167ff875738SArtur Paszkiewicz 31682e38a37fSSong Liu flush_work(&log->disable_writeback_work); 31690576b1c6SShaohua Li md_unregister_thread(&log->reclaim_thread); 3170e8deb638SChristoph Hellwig mempool_destroy(log->meta_pool); 3171c38d29b3SChristoph Hellwig bioset_free(log->bs); 31725036c390SChristoph Hellwig mempool_destroy(log->io_pool); 3173f6bed0efSShaohua Li kmem_cache_destroy(log->io_kc); 3174f6bed0efSShaohua Li kfree(log); 3175f6bed0efSShaohua Li } 3176