12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f6bed0efSShaohua Li /*
3f6bed0efSShaohua Li * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4b4c625c6SSong Liu * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
5f6bed0efSShaohua Li */
6f6bed0efSShaohua Li #include <linux/kernel.h>
7f6bed0efSShaohua Li #include <linux/wait.h>
8f6bed0efSShaohua Li #include <linux/blkdev.h>
9f6bed0efSShaohua Li #include <linux/slab.h>
10f6bed0efSShaohua Li #include <linux/raid/md_p.h>
115cb2fbd6SShaohua Li #include <linux/crc32c.h>
12f6bed0efSShaohua Li #include <linux/random.h>
13ce1ccd07SShaohua Li #include <linux/kthread.h>
1403b047f4SSong Liu #include <linux/types.h>
15f6bed0efSShaohua Li #include "md.h"
16f6bed0efSShaohua Li #include "raid5.h"
17935fe098SMike Snitzer #include "md-bitmap.h"
1870d466f7SSong Liu #include "raid5-log.h"
19f6bed0efSShaohua Li
20f6bed0efSShaohua Li /*
21f6bed0efSShaohua Li * metadata/data stored in disk with 4k size unit (a block) regardless
22f6bed0efSShaohua Li * underneath hardware sector size. only works with PAGE_SIZE == 4096
23f6bed0efSShaohua Li */
24f6bed0efSShaohua Li #define BLOCK_SECTORS (8)
25effe6ee7SSong Liu #define BLOCK_SECTOR_SHIFT (3)
26f6bed0efSShaohua Li
270576b1c6SShaohua Li /*
28a39f7afdSSong Liu * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
29a39f7afdSSong Liu *
30a39f7afdSSong Liu * In write through mode, the reclaim runs every log->max_free_space.
31a39f7afdSSong Liu * This can prevent the recovery scans for too long
320576b1c6SShaohua Li */
330576b1c6SShaohua Li #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
340576b1c6SShaohua Li #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
350576b1c6SShaohua Li
36a39f7afdSSong Liu /* wake up reclaim thread periodically */
37a39f7afdSSong Liu #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
38a39f7afdSSong Liu /* start flush with these full stripes */
3984890c03SShaohua Li #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
40a39f7afdSSong Liu /* reclaim stripes in groups */
41a39f7afdSSong Liu #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
42a39f7afdSSong Liu
43c38d29b3SChristoph Hellwig /*
44c38d29b3SChristoph Hellwig * We only need 2 bios per I/O unit to make progress, but ensure we
45c38d29b3SChristoph Hellwig * have a few more available to not get too tight.
46c38d29b3SChristoph Hellwig */
47c38d29b3SChristoph Hellwig #define R5L_POOL_SIZE 4
48c38d29b3SChristoph Hellwig
492c7da14bSSong Liu static char *r5c_journal_mode_str[] = {"write-through",
502c7da14bSSong Liu "write-back"};
512ded3703SSong Liu /*
522ded3703SSong Liu * raid5 cache state machine
532ded3703SSong Liu *
549b69173eSJackieLiu * With the RAID cache, each stripe works in two phases:
552ded3703SSong Liu * - caching phase
562ded3703SSong Liu * - writing-out phase
572ded3703SSong Liu *
582ded3703SSong Liu * These two phases are controlled by bit STRIPE_R5C_CACHING:
592ded3703SSong Liu * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
602ded3703SSong Liu * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
612ded3703SSong Liu *
622ded3703SSong Liu * When there is no journal, or the journal is in write-through mode,
632ded3703SSong Liu * the stripe is always in writing-out phase.
642ded3703SSong Liu *
652ded3703SSong Liu * For write-back journal, the stripe is sent to caching phase on write
662ded3703SSong Liu * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
672ded3703SSong Liu * the write-out phase by clearing STRIPE_R5C_CACHING.
682ded3703SSong Liu *
692ded3703SSong Liu * Stripes in caching phase do not write the raid disks. Instead, all
702ded3703SSong Liu * writes are committed from the log device. Therefore, a stripe in
712ded3703SSong Liu * caching phase handles writes as:
722ded3703SSong Liu * - write to log device
732ded3703SSong Liu * - return IO
742ded3703SSong Liu *
752ded3703SSong Liu * Stripes in writing-out phase handle writes as:
762ded3703SSong Liu * - calculate parity
772ded3703SSong Liu * - write pending data and parity to journal
782ded3703SSong Liu * - write data and parity to raid disks
792ded3703SSong Liu * - return IO for pending writes
802ded3703SSong Liu */
812ded3703SSong Liu
82f6bed0efSShaohua Li struct r5l_log {
83f6bed0efSShaohua Li struct md_rdev *rdev;
84f6bed0efSShaohua Li
85f6bed0efSShaohua Li u32 uuid_checksum;
86f6bed0efSShaohua Li
87f6bed0efSShaohua Li sector_t device_size; /* log device size, round to
88f6bed0efSShaohua Li * BLOCK_SECTORS */
890576b1c6SShaohua Li sector_t max_free_space; /* reclaim run if free space is at
900576b1c6SShaohua Li * this size */
91f6bed0efSShaohua Li
92f6bed0efSShaohua Li sector_t last_checkpoint; /* log tail. where recovery scan
93f6bed0efSShaohua Li * starts from */
94f6bed0efSShaohua Li u64 last_cp_seq; /* log tail sequence */
95f6bed0efSShaohua Li
96f6bed0efSShaohua Li sector_t log_start; /* log head. where new data appends */
97f6bed0efSShaohua Li u64 seq; /* log head sequence */
98f6bed0efSShaohua Li
9917036461SChristoph Hellwig sector_t next_checkpoint;
10017036461SChristoph Hellwig
101f6bed0efSShaohua Li struct mutex io_mutex;
102f6bed0efSShaohua Li struct r5l_io_unit *current_io; /* current io_unit accepting new data */
103f6bed0efSShaohua Li
104f6bed0efSShaohua Li spinlock_t io_list_lock;
105f6bed0efSShaohua Li struct list_head running_ios; /* io_units which are still running,
106f6bed0efSShaohua Li * and have not yet been completely
107f6bed0efSShaohua Li * written to the log */
108f6bed0efSShaohua Li struct list_head io_end_ios; /* io_units which have been completely
109f6bed0efSShaohua Li * written to the log but not yet written
110f6bed0efSShaohua Li * to the RAID */
111a8c34f91SShaohua Li struct list_head flushing_ios; /* io_units which are waiting for log
112a8c34f91SShaohua Li * cache flush */
11304732f74SChristoph Hellwig struct list_head finished_ios; /* io_units which settle down in log disk */
114a8c34f91SShaohua Li struct bio flush_bio;
115f6bed0efSShaohua Li
1165036c390SChristoph Hellwig struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
1175036c390SChristoph Hellwig
118f6bed0efSShaohua Li struct kmem_cache *io_kc;
119afeee514SKent Overstreet mempool_t io_pool;
120afeee514SKent Overstreet struct bio_set bs;
121afeee514SKent Overstreet mempool_t meta_pool;
122f6bed0efSShaohua Li
12344693154SYu Kuai struct md_thread __rcu *reclaim_thread;
1240576b1c6SShaohua Li unsigned long reclaim_target; /* number of space that need to be
1250576b1c6SShaohua Li * reclaimed. if it's 0, reclaim spaces
1260576b1c6SShaohua Li * used by io_units which are in
1270576b1c6SShaohua Li * IO_UNIT_STRIPE_END state (eg, reclaim
12865b94b52SZhou nan * doesn't wait for specific io_unit
1290576b1c6SShaohua Li * switching to IO_UNIT_STRIPE_END
1300576b1c6SShaohua Li * state) */
1310fd22b45SShaohua Li wait_queue_head_t iounit_wait;
1320576b1c6SShaohua Li
133f6bed0efSShaohua Li struct list_head no_space_stripes; /* pending stripes, log has no space */
134f6bed0efSShaohua Li spinlock_t no_space_stripes_lock;
13556fef7c6SChristoph Hellwig
13656fef7c6SChristoph Hellwig bool need_cache_flush;
1372ded3703SSong Liu
1382ded3703SSong Liu /* for r5c_cache */
1392ded3703SSong Liu enum r5c_journal_mode r5c_journal_mode;
140a39f7afdSSong Liu
141a39f7afdSSong Liu /* all stripes in r5cache, in the order of seq at sh->log_start */
142a39f7afdSSong Liu struct list_head stripe_in_journal_list;
143a39f7afdSSong Liu
144a39f7afdSSong Liu spinlock_t stripe_in_journal_lock;
145a39f7afdSSong Liu atomic_t stripe_in_journal_count;
1463bddb7f8SSong Liu
1473bddb7f8SSong Liu /* to submit async io_units, to fulfill ordering of flush */
1483bddb7f8SSong Liu struct work_struct deferred_io_work;
1492e38a37fSSong Liu /* to disable write back during in degraded mode */
1502e38a37fSSong Liu struct work_struct disable_writeback_work;
15103b047f4SSong Liu
15203b047f4SSong Liu /* to for chunk_aligned_read in writeback mode, details below */
15303b047f4SSong Liu spinlock_t tree_lock;
15403b047f4SSong Liu struct radix_tree_root big_stripe_tree;
155f6bed0efSShaohua Li };
156f6bed0efSShaohua Li
157f6bed0efSShaohua Li /*
15803b047f4SSong Liu * Enable chunk_aligned_read() with write back cache.
15903b047f4SSong Liu *
16003b047f4SSong Liu * Each chunk may contain more than one stripe (for example, a 256kB
16103b047f4SSong Liu * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
16203b047f4SSong Liu * chunk_aligned_read, these stripes are grouped into one "big_stripe".
16303b047f4SSong Liu * For each big_stripe, we count how many stripes of this big_stripe
16403b047f4SSong Liu * are in the write back cache. These data are tracked in a radix tree
16503b047f4SSong Liu * (big_stripe_tree). We use radix_tree item pointer as the counter.
16603b047f4SSong Liu * r5c_tree_index() is used to calculate keys for the radix tree.
16703b047f4SSong Liu *
16803b047f4SSong Liu * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
16903b047f4SSong Liu * big_stripe of each chunk in the tree. If this big_stripe is in the
17003b047f4SSong Liu * tree, chunk_aligned_read() aborts. This look up is protected by
17103b047f4SSong Liu * rcu_read_lock().
17203b047f4SSong Liu *
17303b047f4SSong Liu * It is necessary to remember whether a stripe is counted in
17403b047f4SSong Liu * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
17503b047f4SSong Liu * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
17603b047f4SSong Liu * two flags are set, the stripe is counted in big_stripe_tree. This
17703b047f4SSong Liu * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
17803b047f4SSong Liu * r5c_try_caching_write(); and moving clear_bit of
17903b047f4SSong Liu * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
18003b047f4SSong Liu * r5c_finish_stripe_write_out().
18103b047f4SSong Liu */
18203b047f4SSong Liu
18303b047f4SSong Liu /*
18403b047f4SSong Liu * radix tree requests lowest 2 bits of data pointer to be 2b'00.
18503b047f4SSong Liu * So it is necessary to left shift the counter by 2 bits before using it
18603b047f4SSong Liu * as data pointer of the tree.
18703b047f4SSong Liu */
18803b047f4SSong Liu #define R5C_RADIX_COUNT_SHIFT 2
18903b047f4SSong Liu
19003b047f4SSong Liu /*
19103b047f4SSong Liu * calculate key for big_stripe_tree
19203b047f4SSong Liu *
19303b047f4SSong Liu * sect: align_bi->bi_iter.bi_sector or sh->sector
19403b047f4SSong Liu */
r5c_tree_index(struct r5conf * conf,sector_t sect)19503b047f4SSong Liu static inline sector_t r5c_tree_index(struct r5conf *conf,
19603b047f4SSong Liu sector_t sect)
19703b047f4SSong Liu {
19852923083SDamien Le Moal sector_div(sect, conf->chunk_sectors);
19903b047f4SSong Liu return sect;
20003b047f4SSong Liu }
20103b047f4SSong Liu
20203b047f4SSong Liu /*
203f6bed0efSShaohua Li * an IO range starts from a meta data block and end at the next meta data
204f6bed0efSShaohua Li * block. The io unit's the meta data block tracks data/parity followed it. io
205f6bed0efSShaohua Li * unit is written to log disk with normal write, as we always flush log disk
206f6bed0efSShaohua Li * first and then start move data to raid disks, there is no requirement to
207f6bed0efSShaohua Li * write io unit with FLUSH/FUA
208f6bed0efSShaohua Li */
209f6bed0efSShaohua Li struct r5l_io_unit {
210f6bed0efSShaohua Li struct r5l_log *log;
211f6bed0efSShaohua Li
212f6bed0efSShaohua Li struct page *meta_page; /* store meta block */
213f6bed0efSShaohua Li int meta_offset; /* current offset in meta_page */
214f6bed0efSShaohua Li
215f6bed0efSShaohua Li struct bio *current_bio;/* current_bio accepting new data */
216f6bed0efSShaohua Li
217f6bed0efSShaohua Li atomic_t pending_stripe;/* how many stripes not flushed to raid */
218f6bed0efSShaohua Li u64 seq; /* seq number of the metablock */
219f6bed0efSShaohua Li sector_t log_start; /* where the io_unit starts */
220f6bed0efSShaohua Li sector_t log_end; /* where the io_unit ends */
221f6bed0efSShaohua Li struct list_head log_sibling; /* log->running_ios */
222f6bed0efSShaohua Li struct list_head stripe_list; /* stripes added to the io_unit */
223f6bed0efSShaohua Li
224f6bed0efSShaohua Li int state;
2256143e2ceSChristoph Hellwig bool need_split_bio;
2263bddb7f8SSong Liu struct bio *split_bio;
2273bddb7f8SSong Liu
2283bddb7f8SSong Liu unsigned int has_flush:1; /* include flush request */
2293bddb7f8SSong Liu unsigned int has_fua:1; /* include fua request */
230a9501d74SSong Liu unsigned int has_null_flush:1; /* include null flush request */
231a9501d74SSong Liu unsigned int has_flush_payload:1; /* include flush payload */
2323bddb7f8SSong Liu /*
2333bddb7f8SSong Liu * io isn't sent yet, flush/fua request can only be submitted till it's
2343bddb7f8SSong Liu * the first IO in running_ios list
2353bddb7f8SSong Liu */
2363bddb7f8SSong Liu unsigned int io_deferred:1;
2373bddb7f8SSong Liu
2383bddb7f8SSong Liu struct bio_list flush_barriers; /* size == 0 flush bios */
239f6bed0efSShaohua Li };
240f6bed0efSShaohua Li
241f6bed0efSShaohua Li /* r5l_io_unit state */
242f6bed0efSShaohua Li enum r5l_io_unit_state {
243f6bed0efSShaohua Li IO_UNIT_RUNNING = 0, /* accepting new IO */
244f6bed0efSShaohua Li IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
245f6bed0efSShaohua Li * don't accepting new bio */
246f6bed0efSShaohua Li IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
247a8c34f91SShaohua Li IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
248f6bed0efSShaohua Li };
249f6bed0efSShaohua Li
r5c_is_writeback(struct r5l_log * log)2502ded3703SSong Liu bool r5c_is_writeback(struct r5l_log *log)
2512ded3703SSong Liu {
2522ded3703SSong Liu return (log != NULL &&
2532ded3703SSong Liu log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
2542ded3703SSong Liu }
2552ded3703SSong Liu
r5l_ring_add(struct r5l_log * log,sector_t start,sector_t inc)256f6bed0efSShaohua Li static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
257f6bed0efSShaohua Li {
258f6bed0efSShaohua Li start += inc;
259f6bed0efSShaohua Li if (start >= log->device_size)
260f6bed0efSShaohua Li start = start - log->device_size;
261f6bed0efSShaohua Li return start;
262f6bed0efSShaohua Li }
263f6bed0efSShaohua Li
r5l_ring_distance(struct r5l_log * log,sector_t start,sector_t end)264f6bed0efSShaohua Li static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
265f6bed0efSShaohua Li sector_t end)
266f6bed0efSShaohua Li {
267f6bed0efSShaohua Li if (end >= start)
268f6bed0efSShaohua Li return end - start;
269f6bed0efSShaohua Li else
270f6bed0efSShaohua Li return end + log->device_size - start;
271f6bed0efSShaohua Li }
272f6bed0efSShaohua Li
r5l_has_free_space(struct r5l_log * log,sector_t size)273f6bed0efSShaohua Li static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
274f6bed0efSShaohua Li {
275f6bed0efSShaohua Li sector_t used_size;
276f6bed0efSShaohua Li
277f6bed0efSShaohua Li used_size = r5l_ring_distance(log, log->last_checkpoint,
278f6bed0efSShaohua Li log->log_start);
279f6bed0efSShaohua Li
280f6bed0efSShaohua Li return log->device_size > used_size + size;
281f6bed0efSShaohua Li }
282f6bed0efSShaohua Li
__r5l_set_io_unit_state(struct r5l_io_unit * io,enum r5l_io_unit_state state)283f6bed0efSShaohua Li static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
284f6bed0efSShaohua Li enum r5l_io_unit_state state)
285f6bed0efSShaohua Li {
286f6bed0efSShaohua Li if (WARN_ON(io->state >= state))
287f6bed0efSShaohua Li return;
288f6bed0efSShaohua Li io->state = state;
289f6bed0efSShaohua Li }
290f6bed0efSShaohua Li
2911e6d690bSSong Liu static void
r5c_return_dev_pending_writes(struct r5conf * conf,struct r5dev * dev)292bd83d0a2SNeilBrown r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
2931e6d690bSSong Liu {
2941e6d690bSSong Liu struct bio *wbi, *wbi2;
2951e6d690bSSong Liu
2961e6d690bSSong Liu wbi = dev->written;
2971e6d690bSSong Liu dev->written = NULL;
2981e6d690bSSong Liu while (wbi && wbi->bi_iter.bi_sector <
299c911c46cSYufen Yu dev->sector + RAID5_STRIPE_SECTORS(conf)) {
300c911c46cSYufen Yu wbi2 = r5_next_bio(conf, wbi, dev->sector);
3011e6d690bSSong Liu md_write_end(conf->mddev);
302bd83d0a2SNeilBrown bio_endio(wbi);
3031e6d690bSSong Liu wbi = wbi2;
3041e6d690bSSong Liu }
3051e6d690bSSong Liu }
3061e6d690bSSong Liu
r5c_handle_cached_data_endio(struct r5conf * conf,struct stripe_head * sh,int disks)3071e6d690bSSong Liu void r5c_handle_cached_data_endio(struct r5conf *conf,
308bd83d0a2SNeilBrown struct stripe_head *sh, int disks)
3091e6d690bSSong Liu {
3101e6d690bSSong Liu int i;
3111e6d690bSSong Liu
3121e6d690bSSong Liu for (i = sh->disks; i--; ) {
3131e6d690bSSong Liu if (sh->dev[i].written) {
3141e6d690bSSong Liu set_bit(R5_UPTODATE, &sh->dev[i].flags);
315bd83d0a2SNeilBrown r5c_return_dev_pending_writes(conf, &sh->dev[i]);
316e64e4018SAndy Shevchenko md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
317c911c46cSYufen Yu RAID5_STRIPE_SECTORS(conf),
3181e6d690bSSong Liu !test_bit(STRIPE_DEGRADED, &sh->state),
3191e6d690bSSong Liu 0);
3201e6d690bSSong Liu }
3211e6d690bSSong Liu }
3221e6d690bSSong Liu }
3231e6d690bSSong Liu
324ff875738SArtur Paszkiewicz void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
325ff875738SArtur Paszkiewicz
326a39f7afdSSong Liu /* Check whether we should flush some stripes to free up stripe cache */
r5c_check_stripe_cache_usage(struct r5conf * conf)327a39f7afdSSong Liu void r5c_check_stripe_cache_usage(struct r5conf *conf)
328a39f7afdSSong Liu {
329a39f7afdSSong Liu int total_cached;
33006a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
331a39f7afdSSong Liu
33206a4d0d8SYu Kuai if (!r5c_is_writeback(log))
333a39f7afdSSong Liu return;
334a39f7afdSSong Liu
335a39f7afdSSong Liu total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
336a39f7afdSSong Liu atomic_read(&conf->r5c_cached_full_stripes);
337a39f7afdSSong Liu
338a39f7afdSSong Liu /*
339a39f7afdSSong Liu * The following condition is true for either of the following:
340a39f7afdSSong Liu * - stripe cache pressure high:
341a39f7afdSSong Liu * total_cached > 3/4 min_nr_stripes ||
342a39f7afdSSong Liu * empty_inactive_list_nr > 0
343a39f7afdSSong Liu * - stripe cache pressure moderate:
344a39f7afdSSong Liu * total_cached > 1/2 min_nr_stripes
345a39f7afdSSong Liu */
346a39f7afdSSong Liu if (total_cached > conf->min_nr_stripes * 1 / 2 ||
347a39f7afdSSong Liu atomic_read(&conf->empty_inactive_list_nr) > 0)
34806a4d0d8SYu Kuai r5l_wake_reclaim(log, 0);
349a39f7afdSSong Liu }
350a39f7afdSSong Liu
351a39f7afdSSong Liu /*
352a39f7afdSSong Liu * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
353a39f7afdSSong Liu * stripes in the cache
354a39f7afdSSong Liu */
r5c_check_cached_full_stripe(struct r5conf * conf)355a39f7afdSSong Liu void r5c_check_cached_full_stripe(struct r5conf *conf)
356a39f7afdSSong Liu {
35706a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
35806a4d0d8SYu Kuai
35906a4d0d8SYu Kuai if (!r5c_is_writeback(log))
360a39f7afdSSong Liu return;
361a39f7afdSSong Liu
362a39f7afdSSong Liu /*
363a39f7afdSSong Liu * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
364a39f7afdSSong Liu * or a full stripe (chunk size / 4k stripes).
365a39f7afdSSong Liu */
366a39f7afdSSong Liu if (atomic_read(&conf->r5c_cached_full_stripes) >=
36784890c03SShaohua Li min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
368c911c46cSYufen Yu conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
36906a4d0d8SYu Kuai r5l_wake_reclaim(log, 0);
370a39f7afdSSong Liu }
371a39f7afdSSong Liu
372a39f7afdSSong Liu /*
373a39f7afdSSong Liu * Total log space (in sectors) needed to flush all data in cache
374a39f7afdSSong Liu *
37539b99586SSong Liu * To avoid deadlock due to log space, it is necessary to reserve log
37639b99586SSong Liu * space to flush critical stripes (stripes that occupying log space near
37739b99586SSong Liu * last_checkpoint). This function helps check how much log space is
37839b99586SSong Liu * required to flush all cached stripes.
379a39f7afdSSong Liu *
38039b99586SSong Liu * To reduce log space requirements, two mechanisms are used to give cache
38139b99586SSong Liu * flush higher priorities:
38239b99586SSong Liu * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
38339b99586SSong Liu * stripes ALREADY in journal can be flushed w/o pending writes;
38439b99586SSong Liu * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
38539b99586SSong Liu * can be delayed (r5l_add_no_space_stripe).
386a39f7afdSSong Liu *
38739b99586SSong Liu * In cache flush, the stripe goes through 1 and then 2. For a stripe that
38839b99586SSong Liu * already passed 1, flushing it requires at most (conf->max_degraded + 1)
38939b99586SSong Liu * pages of journal space. For stripes that has not passed 1, flushing it
39039b99586SSong Liu * requires (conf->raid_disks + 1) pages of journal space. There are at
39139b99586SSong Liu * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
39239b99586SSong Liu * required to flush all cached stripes (in pages) is:
39339b99586SSong Liu *
39439b99586SSong Liu * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
39539b99586SSong Liu * (group_cnt + 1) * (raid_disks + 1)
39639b99586SSong Liu * or
39739b99586SSong Liu * (stripe_in_journal_count) * (max_degraded + 1) +
39839b99586SSong Liu * (group_cnt + 1) * (raid_disks - max_degraded)
399a39f7afdSSong Liu */
r5c_log_required_to_flush_cache(struct r5conf * conf)400a39f7afdSSong Liu static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
401a39f7afdSSong Liu {
40206a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
403a39f7afdSSong Liu
404a39f7afdSSong Liu if (!r5c_is_writeback(log))
405a39f7afdSSong Liu return 0;
406a39f7afdSSong Liu
40739b99586SSong Liu return BLOCK_SECTORS *
40839b99586SSong Liu ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
40939b99586SSong Liu (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
410a39f7afdSSong Liu }
411a39f7afdSSong Liu
412a39f7afdSSong Liu /*
413a39f7afdSSong Liu * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
414a39f7afdSSong Liu *
415a39f7afdSSong Liu * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
416a39f7afdSSong Liu * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
417a39f7afdSSong Liu * device is less than 2x of reclaim_required_space.
418a39f7afdSSong Liu */
r5c_update_log_state(struct r5l_log * log)419a39f7afdSSong Liu static inline void r5c_update_log_state(struct r5l_log *log)
420a39f7afdSSong Liu {
421a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private;
422a39f7afdSSong Liu sector_t free_space;
423a39f7afdSSong Liu sector_t reclaim_space;
424f687a33eSSong Liu bool wake_reclaim = false;
425a39f7afdSSong Liu
426a39f7afdSSong Liu if (!r5c_is_writeback(log))
427a39f7afdSSong Liu return;
428a39f7afdSSong Liu
429a39f7afdSSong Liu free_space = r5l_ring_distance(log, log->log_start,
430a39f7afdSSong Liu log->last_checkpoint);
431a39f7afdSSong Liu reclaim_space = r5c_log_required_to_flush_cache(conf);
432a39f7afdSSong Liu if (free_space < 2 * reclaim_space)
433a39f7afdSSong Liu set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
434f687a33eSSong Liu else {
435f687a33eSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
436f687a33eSSong Liu wake_reclaim = true;
437a39f7afdSSong Liu clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
438f687a33eSSong Liu }
439a39f7afdSSong Liu if (free_space < 3 * reclaim_space)
440a39f7afdSSong Liu set_bit(R5C_LOG_TIGHT, &conf->cache_state);
441a39f7afdSSong Liu else
442a39f7afdSSong Liu clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
443f687a33eSSong Liu
444f687a33eSSong Liu if (wake_reclaim)
445f687a33eSSong Liu r5l_wake_reclaim(log, 0);
446a39f7afdSSong Liu }
447a39f7afdSSong Liu
4482ded3703SSong Liu /*
4492ded3703SSong Liu * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
4502ded3703SSong Liu * This function should only be called in write-back mode.
4512ded3703SSong Liu */
r5c_make_stripe_write_out(struct stripe_head * sh)452a39f7afdSSong Liu void r5c_make_stripe_write_out(struct stripe_head *sh)
4532ded3703SSong Liu {
4542ded3703SSong Liu struct r5conf *conf = sh->raid_conf;
45506a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
4562ded3703SSong Liu
4572ded3703SSong Liu BUG_ON(!r5c_is_writeback(log));
4582ded3703SSong Liu
4592ded3703SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
4602ded3703SSong Liu clear_bit(STRIPE_R5C_CACHING, &sh->state);
4611e6d690bSSong Liu
4621e6d690bSSong Liu if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4631e6d690bSSong Liu atomic_inc(&conf->preread_active_stripes);
4641e6d690bSSong Liu }
4651e6d690bSSong Liu
r5c_handle_data_cached(struct stripe_head * sh)4661e6d690bSSong Liu static void r5c_handle_data_cached(struct stripe_head *sh)
4671e6d690bSSong Liu {
4681e6d690bSSong Liu int i;
4691e6d690bSSong Liu
4701e6d690bSSong Liu for (i = sh->disks; i--; )
4711e6d690bSSong Liu if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
4721e6d690bSSong Liu set_bit(R5_InJournal, &sh->dev[i].flags);
4731e6d690bSSong Liu clear_bit(R5_LOCKED, &sh->dev[i].flags);
4741e6d690bSSong Liu }
4751e6d690bSSong Liu clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
4761e6d690bSSong Liu }
4771e6d690bSSong Liu
4781e6d690bSSong Liu /*
4791e6d690bSSong Liu * this journal write must contain full parity,
4801e6d690bSSong Liu * it may also contain some data pages
4811e6d690bSSong Liu */
r5c_handle_parity_cached(struct stripe_head * sh)4821e6d690bSSong Liu static void r5c_handle_parity_cached(struct stripe_head *sh)
4831e6d690bSSong Liu {
4841e6d690bSSong Liu int i;
4851e6d690bSSong Liu
4861e6d690bSSong Liu for (i = sh->disks; i--; )
4871e6d690bSSong Liu if (test_bit(R5_InJournal, &sh->dev[i].flags))
4881e6d690bSSong Liu set_bit(R5_Wantwrite, &sh->dev[i].flags);
4892ded3703SSong Liu }
4902ded3703SSong Liu
4912ded3703SSong Liu /*
4922ded3703SSong Liu * Setting proper flags after writing (or flushing) data and/or parity to the
4932ded3703SSong Liu * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
4942ded3703SSong Liu */
r5c_finish_cache_stripe(struct stripe_head * sh)4952ded3703SSong Liu static void r5c_finish_cache_stripe(struct stripe_head *sh)
4962ded3703SSong Liu {
49706a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
4982ded3703SSong Liu
4992ded3703SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
5002ded3703SSong Liu BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
5012ded3703SSong Liu /*
5022ded3703SSong Liu * Set R5_InJournal for parity dev[pd_idx]. This means
5032ded3703SSong Liu * all data AND parity in the journal. For RAID 6, it is
5042ded3703SSong Liu * NOT necessary to set the flag for dev[qd_idx], as the
5052ded3703SSong Liu * two parities are written out together.
5062ded3703SSong Liu */
5072ded3703SSong Liu set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
5081e6d690bSSong Liu } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
5091e6d690bSSong Liu r5c_handle_data_cached(sh);
5101e6d690bSSong Liu } else {
5111e6d690bSSong Liu r5c_handle_parity_cached(sh);
5121e6d690bSSong Liu set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
5131e6d690bSSong Liu }
5142ded3703SSong Liu }
5152ded3703SSong Liu
r5l_io_run_stripes(struct r5l_io_unit * io)516d8858f43SChristoph Hellwig static void r5l_io_run_stripes(struct r5l_io_unit *io)
517d8858f43SChristoph Hellwig {
518d8858f43SChristoph Hellwig struct stripe_head *sh, *next;
519d8858f43SChristoph Hellwig
520d8858f43SChristoph Hellwig list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
521d8858f43SChristoph Hellwig list_del_init(&sh->log_list);
5222ded3703SSong Liu
5232ded3703SSong Liu r5c_finish_cache_stripe(sh);
5242ded3703SSong Liu
525d8858f43SChristoph Hellwig set_bit(STRIPE_HANDLE, &sh->state);
526d8858f43SChristoph Hellwig raid5_release_stripe(sh);
527d8858f43SChristoph Hellwig }
528d8858f43SChristoph Hellwig }
529d8858f43SChristoph Hellwig
r5l_log_run_stripes(struct r5l_log * log)53056fef7c6SChristoph Hellwig static void r5l_log_run_stripes(struct r5l_log *log)
53156fef7c6SChristoph Hellwig {
53256fef7c6SChristoph Hellwig struct r5l_io_unit *io, *next;
53356fef7c6SChristoph Hellwig
534efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock);
53556fef7c6SChristoph Hellwig
53656fef7c6SChristoph Hellwig list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
53756fef7c6SChristoph Hellwig /* don't change list order */
53856fef7c6SChristoph Hellwig if (io->state < IO_UNIT_IO_END)
53956fef7c6SChristoph Hellwig break;
54056fef7c6SChristoph Hellwig
54156fef7c6SChristoph Hellwig list_move_tail(&io->log_sibling, &log->finished_ios);
54256fef7c6SChristoph Hellwig r5l_io_run_stripes(io);
54356fef7c6SChristoph Hellwig }
54456fef7c6SChristoph Hellwig }
54556fef7c6SChristoph Hellwig
r5l_move_to_end_ios(struct r5l_log * log)5463848c0bcSChristoph Hellwig static void r5l_move_to_end_ios(struct r5l_log *log)
5473848c0bcSChristoph Hellwig {
5483848c0bcSChristoph Hellwig struct r5l_io_unit *io, *next;
5493848c0bcSChristoph Hellwig
550efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock);
5513848c0bcSChristoph Hellwig
5523848c0bcSChristoph Hellwig list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
5533848c0bcSChristoph Hellwig /* don't change list order */
5543848c0bcSChristoph Hellwig if (io->state < IO_UNIT_IO_END)
5553848c0bcSChristoph Hellwig break;
5563848c0bcSChristoph Hellwig list_move_tail(&io->log_sibling, &log->io_end_ios);
5573848c0bcSChristoph Hellwig }
5583848c0bcSChristoph Hellwig }
5593848c0bcSChristoph Hellwig
5603bddb7f8SSong Liu static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
r5l_log_endio(struct bio * bio)561f6bed0efSShaohua Li static void r5l_log_endio(struct bio *bio)
562f6bed0efSShaohua Li {
563f6bed0efSShaohua Li struct r5l_io_unit *io = bio->bi_private;
5643bddb7f8SSong Liu struct r5l_io_unit *io_deferred;
565f6bed0efSShaohua Li struct r5l_log *log = io->log;
566509ffec7SChristoph Hellwig unsigned long flags;
567a9501d74SSong Liu bool has_null_flush;
568a9501d74SSong Liu bool has_flush_payload;
569f6bed0efSShaohua Li
5704e4cbee9SChristoph Hellwig if (bio->bi_status)
5716e74a9cfSShaohua Li md_error(log->rdev->mddev, log->rdev);
5726e74a9cfSShaohua Li
573f6bed0efSShaohua Li bio_put(bio);
574afeee514SKent Overstreet mempool_free(io->meta_page, &log->meta_pool);
575f6bed0efSShaohua Li
576509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags);
577509ffec7SChristoph Hellwig __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
578a9501d74SSong Liu
579a9501d74SSong Liu /*
580a9501d74SSong Liu * if the io doesn't not have null_flush or flush payload,
581a9501d74SSong Liu * it is not safe to access it after releasing io_list_lock.
582a9501d74SSong Liu * Therefore, it is necessary to check the condition with
583a9501d74SSong Liu * the lock held.
584a9501d74SSong Liu */
585a9501d74SSong Liu has_null_flush = io->has_null_flush;
586a9501d74SSong Liu has_flush_payload = io->has_flush_payload;
587a9501d74SSong Liu
588ea17481fSSong Liu if (log->need_cache_flush && !list_empty(&io->stripe_list))
5893848c0bcSChristoph Hellwig r5l_move_to_end_ios(log);
59056fef7c6SChristoph Hellwig else
59156fef7c6SChristoph Hellwig r5l_log_run_stripes(log);
5923bddb7f8SSong Liu if (!list_empty(&log->running_ios)) {
5933bddb7f8SSong Liu /*
5943bddb7f8SSong Liu * FLUSH/FUA io_unit is deferred because of ordering, now we
5953bddb7f8SSong Liu * can dispatch it
5963bddb7f8SSong Liu */
5973bddb7f8SSong Liu io_deferred = list_first_entry(&log->running_ios,
5983bddb7f8SSong Liu struct r5l_io_unit, log_sibling);
5993bddb7f8SSong Liu if (io_deferred->io_deferred)
6003bddb7f8SSong Liu schedule_work(&log->deferred_io_work);
6013bddb7f8SSong Liu }
6023bddb7f8SSong Liu
603509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags);
604509ffec7SChristoph Hellwig
60556fef7c6SChristoph Hellwig if (log->need_cache_flush)
606f6bed0efSShaohua Li md_wakeup_thread(log->rdev->mddev->thread);
6073bddb7f8SSong Liu
608a9501d74SSong Liu /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
609a9501d74SSong Liu if (has_null_flush) {
6103bddb7f8SSong Liu struct bio *bi;
6113bddb7f8SSong Liu
6123bddb7f8SSong Liu WARN_ON(bio_list_empty(&io->flush_barriers));
6133bddb7f8SSong Liu while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
6143bddb7f8SSong Liu bio_endio(bi);
615a9501d74SSong Liu if (atomic_dec_and_test(&io->pending_stripe)) {
616a9501d74SSong Liu __r5l_stripe_write_finished(io);
617a9501d74SSong Liu return;
6183bddb7f8SSong Liu }
619ea17481fSSong Liu }
620a9501d74SSong Liu }
621a9501d74SSong Liu /* decrease pending_stripe for flush payload */
622a9501d74SSong Liu if (has_flush_payload)
623a9501d74SSong Liu if (atomic_dec_and_test(&io->pending_stripe))
6243bddb7f8SSong Liu __r5l_stripe_write_finished(io);
6253bddb7f8SSong Liu }
6263bddb7f8SSong Liu
r5l_do_submit_io(struct r5l_log * log,struct r5l_io_unit * io)6273bddb7f8SSong Liu static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
6283bddb7f8SSong Liu {
6293bddb7f8SSong Liu unsigned long flags;
6303bddb7f8SSong Liu
6313bddb7f8SSong Liu spin_lock_irqsave(&log->io_list_lock, flags);
6323bddb7f8SSong Liu __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
6333bddb7f8SSong Liu spin_unlock_irqrestore(&log->io_list_lock, flags);
6343bddb7f8SSong Liu
635bb3338d3SSong Liu /*
636bb3338d3SSong Liu * In case of journal device failures, submit_bio will get error
637bb3338d3SSong Liu * and calls endio, then active stripes will continue write
638bb3338d3SSong Liu * process. Therefore, it is not necessary to check Faulty bit
639bb3338d3SSong Liu * of journal device here.
640bb3338d3SSong Liu *
641bb3338d3SSong Liu * We can't check split_bio after current_bio is submitted. If
642bb3338d3SSong Liu * io->split_bio is null, after current_bio is submitted, current_bio
643bb3338d3SSong Liu * might already be completed and the io_unit is freed. We submit
644bb3338d3SSong Liu * split_bio first to avoid the issue.
645bb3338d3SSong Liu */
646bb3338d3SSong Liu if (io->split_bio) {
6473bddb7f8SSong Liu if (io->has_flush)
64820737738SShaohua Li io->split_bio->bi_opf |= REQ_PREFLUSH;
6493bddb7f8SSong Liu if (io->has_fua)
65020737738SShaohua Li io->split_bio->bi_opf |= REQ_FUA;
6513bddb7f8SSong Liu submit_bio(io->split_bio);
6523bddb7f8SSong Liu }
6533bddb7f8SSong Liu
654bb3338d3SSong Liu if (io->has_flush)
655bb3338d3SSong Liu io->current_bio->bi_opf |= REQ_PREFLUSH;
656bb3338d3SSong Liu if (io->has_fua)
657bb3338d3SSong Liu io->current_bio->bi_opf |= REQ_FUA;
658bb3338d3SSong Liu submit_bio(io->current_bio);
659bb3338d3SSong Liu }
660bb3338d3SSong Liu
6613bddb7f8SSong Liu /* deferred io_unit will be dispatched here */
r5l_submit_io_async(struct work_struct * work)6623bddb7f8SSong Liu static void r5l_submit_io_async(struct work_struct *work)
6633bddb7f8SSong Liu {
6643bddb7f8SSong Liu struct r5l_log *log = container_of(work, struct r5l_log,
6653bddb7f8SSong Liu deferred_io_work);
6663bddb7f8SSong Liu struct r5l_io_unit *io = NULL;
6673bddb7f8SSong Liu unsigned long flags;
6683bddb7f8SSong Liu
6693bddb7f8SSong Liu spin_lock_irqsave(&log->io_list_lock, flags);
6703bddb7f8SSong Liu if (!list_empty(&log->running_ios)) {
6713bddb7f8SSong Liu io = list_first_entry(&log->running_ios, struct r5l_io_unit,
6723bddb7f8SSong Liu log_sibling);
6733bddb7f8SSong Liu if (!io->io_deferred)
6743bddb7f8SSong Liu io = NULL;
6753bddb7f8SSong Liu else
6763bddb7f8SSong Liu io->io_deferred = 0;
6773bddb7f8SSong Liu }
6783bddb7f8SSong Liu spin_unlock_irqrestore(&log->io_list_lock, flags);
6793bddb7f8SSong Liu if (io)
6803bddb7f8SSong Liu r5l_do_submit_io(log, io);
681f6bed0efSShaohua Li }
682f6bed0efSShaohua Li
r5c_disable_writeback_async(struct work_struct * work)6832e38a37fSSong Liu static void r5c_disable_writeback_async(struct work_struct *work)
6842e38a37fSSong Liu {
6852e38a37fSSong Liu struct r5l_log *log = container_of(work, struct r5l_log,
6862e38a37fSSong Liu disable_writeback_work);
6872e38a37fSSong Liu struct mddev *mddev = log->rdev->mddev;
6884d5324f7SNeilBrown struct r5conf *conf = mddev->private;
6892e38a37fSSong Liu
6902e38a37fSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
6912e38a37fSSong Liu return;
6922e38a37fSSong Liu pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
6932e38a37fSSong Liu mdname(mddev));
69470d466f7SSong Liu
69570d466f7SSong Liu /* wait superblock change before suspend */
69670d466f7SSong Liu wait_event(mddev->sb_wait,
69706a4d0d8SYu Kuai !READ_ONCE(conf->log) ||
6981b172e0bSYu Kuai !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6991b172e0bSYu Kuai
7001b172e0bSYu Kuai log = READ_ONCE(conf->log);
7011b172e0bSYu Kuai if (log) {
7022b16a525SYu Kuai mddev_suspend(mddev, false);
7032e38a37fSSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
7042b16a525SYu Kuai mddev_resume(mddev);
7054d5324f7SNeilBrown }
7062e38a37fSSong Liu }
7072e38a37fSSong Liu
r5l_submit_current_io(struct r5l_log * log)708f6bed0efSShaohua Li static void r5l_submit_current_io(struct r5l_log *log)
709f6bed0efSShaohua Li {
710f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io;
711f6bed0efSShaohua Li struct r5l_meta_block *block;
712509ffec7SChristoph Hellwig unsigned long flags;
713f6bed0efSShaohua Li u32 crc;
7143bddb7f8SSong Liu bool do_submit = true;
715f6bed0efSShaohua Li
716f6bed0efSShaohua Li if (!io)
717f6bed0efSShaohua Li return;
718f6bed0efSShaohua Li
719f6bed0efSShaohua Li block = page_address(io->meta_page);
720f6bed0efSShaohua Li block->meta_size = cpu_to_le32(io->meta_offset);
7215cb2fbd6SShaohua Li crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
722f6bed0efSShaohua Li block->checksum = cpu_to_le32(crc);
723f6bed0efSShaohua Li
724f6bed0efSShaohua Li log->current_io = NULL;
725509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags);
7263bddb7f8SSong Liu if (io->has_flush || io->has_fua) {
7273bddb7f8SSong Liu if (io != list_first_entry(&log->running_ios,
7283bddb7f8SSong Liu struct r5l_io_unit, log_sibling)) {
7293bddb7f8SSong Liu io->io_deferred = 1;
7303bddb7f8SSong Liu do_submit = false;
7313bddb7f8SSong Liu }
7323bddb7f8SSong Liu }
733509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags);
7343bddb7f8SSong Liu if (do_submit)
7353bddb7f8SSong Liu r5l_do_submit_io(log, io);
736f6bed0efSShaohua Li }
737f6bed0efSShaohua Li
r5l_bio_alloc(struct r5l_log * log)7386143e2ceSChristoph Hellwig static struct bio *r5l_bio_alloc(struct r5l_log *log)
739b349feb3SChristoph Hellwig {
740609be106SChristoph Hellwig struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
741609be106SChristoph Hellwig REQ_OP_WRITE, GFP_NOIO, &log->bs);
742b349feb3SChristoph Hellwig
7431e932a37SChristoph Hellwig bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
744b349feb3SChristoph Hellwig
745b349feb3SChristoph Hellwig return bio;
746b349feb3SChristoph Hellwig }
747b349feb3SChristoph Hellwig
r5_reserve_log_entry(struct r5l_log * log,struct r5l_io_unit * io)748c1b99198SChristoph Hellwig static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
749c1b99198SChristoph Hellwig {
750c1b99198SChristoph Hellwig log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
751c1b99198SChristoph Hellwig
752a39f7afdSSong Liu r5c_update_log_state(log);
753c1b99198SChristoph Hellwig /*
754c1b99198SChristoph Hellwig * If we filled up the log device start from the beginning again,
755c1b99198SChristoph Hellwig * which will require a new bio.
756c1b99198SChristoph Hellwig *
757c1b99198SChristoph Hellwig * Note: for this to work properly the log size needs to me a multiple
758c1b99198SChristoph Hellwig * of BLOCK_SECTORS.
759c1b99198SChristoph Hellwig */
760c1b99198SChristoph Hellwig if (log->log_start == 0)
7616143e2ceSChristoph Hellwig io->need_split_bio = true;
762c1b99198SChristoph Hellwig
763c1b99198SChristoph Hellwig io->log_end = log->log_start;
764c1b99198SChristoph Hellwig }
765c1b99198SChristoph Hellwig
r5l_new_meta(struct r5l_log * log)766f6bed0efSShaohua Li static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
767f6bed0efSShaohua Li {
768f6bed0efSShaohua Li struct r5l_io_unit *io;
769f6bed0efSShaohua Li struct r5l_meta_block *block;
770f6bed0efSShaohua Li
771afeee514SKent Overstreet io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
7725036c390SChristoph Hellwig if (!io)
7735036c390SChristoph Hellwig return NULL;
7745036c390SChristoph Hellwig memset(io, 0, sizeof(*io));
7755036c390SChristoph Hellwig
77651039cd0SChristoph Hellwig io->log = log;
77751039cd0SChristoph Hellwig INIT_LIST_HEAD(&io->log_sibling);
77851039cd0SChristoph Hellwig INIT_LIST_HEAD(&io->stripe_list);
7793bddb7f8SSong Liu bio_list_init(&io->flush_barriers);
78051039cd0SChristoph Hellwig io->state = IO_UNIT_RUNNING;
781f6bed0efSShaohua Li
782afeee514SKent Overstreet io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
783f6bed0efSShaohua Li block = page_address(io->meta_page);
784e8deb638SChristoph Hellwig clear_page(block);
785f6bed0efSShaohua Li block->magic = cpu_to_le32(R5LOG_MAGIC);
786f6bed0efSShaohua Li block->version = R5LOG_VERSION;
787f6bed0efSShaohua Li block->seq = cpu_to_le64(log->seq);
788f6bed0efSShaohua Li block->position = cpu_to_le64(log->log_start);
789f6bed0efSShaohua Li
790f6bed0efSShaohua Li io->log_start = log->log_start;
791f6bed0efSShaohua Li io->meta_offset = sizeof(struct r5l_meta_block);
7922b8ef16eSChristoph Hellwig io->seq = log->seq++;
793f6bed0efSShaohua Li
7946143e2ceSChristoph Hellwig io->current_bio = r5l_bio_alloc(log);
7956143e2ceSChristoph Hellwig io->current_bio->bi_end_io = r5l_log_endio;
7966143e2ceSChristoph Hellwig io->current_bio->bi_private = io;
797b0a2f17cSJohannes Thumshirn __bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
798f6bed0efSShaohua Li
799c1b99198SChristoph Hellwig r5_reserve_log_entry(log, io);
800f6bed0efSShaohua Li
801f6bed0efSShaohua Li spin_lock_irq(&log->io_list_lock);
802f6bed0efSShaohua Li list_add_tail(&io->log_sibling, &log->running_ios);
803f6bed0efSShaohua Li spin_unlock_irq(&log->io_list_lock);
804f6bed0efSShaohua Li
805f6bed0efSShaohua Li return io;
806f6bed0efSShaohua Li }
807f6bed0efSShaohua Li
r5l_get_meta(struct r5l_log * log,unsigned int payload_size)808f6bed0efSShaohua Li static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
809f6bed0efSShaohua Li {
81022581f58SChristoph Hellwig if (log->current_io &&
81122581f58SChristoph Hellwig log->current_io->meta_offset + payload_size > PAGE_SIZE)
812f6bed0efSShaohua Li r5l_submit_current_io(log);
813f6bed0efSShaohua Li
8145036c390SChristoph Hellwig if (!log->current_io) {
815f6bed0efSShaohua Li log->current_io = r5l_new_meta(log);
8165036c390SChristoph Hellwig if (!log->current_io)
8175036c390SChristoph Hellwig return -ENOMEM;
8185036c390SChristoph Hellwig }
8195036c390SChristoph Hellwig
820f6bed0efSShaohua Li return 0;
821f6bed0efSShaohua Li }
822f6bed0efSShaohua Li
r5l_append_payload_meta(struct r5l_log * log,u16 type,sector_t location,u32 checksum1,u32 checksum2,bool checksum2_valid)823f6bed0efSShaohua Li static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
824f6bed0efSShaohua Li sector_t location,
825f6bed0efSShaohua Li u32 checksum1, u32 checksum2,
826f6bed0efSShaohua Li bool checksum2_valid)
827f6bed0efSShaohua Li {
828f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io;
829f6bed0efSShaohua Li struct r5l_payload_data_parity *payload;
830f6bed0efSShaohua Li
831f6bed0efSShaohua Li payload = page_address(io->meta_page) + io->meta_offset;
832f6bed0efSShaohua Li payload->header.type = cpu_to_le16(type);
833f6bed0efSShaohua Li payload->header.flags = cpu_to_le16(0);
834f6bed0efSShaohua Li payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
835f6bed0efSShaohua Li (PAGE_SHIFT - 9));
836f6bed0efSShaohua Li payload->location = cpu_to_le64(location);
837f6bed0efSShaohua Li payload->checksum[0] = cpu_to_le32(checksum1);
838f6bed0efSShaohua Li if (checksum2_valid)
839f6bed0efSShaohua Li payload->checksum[1] = cpu_to_le32(checksum2);
840f6bed0efSShaohua Li
841f6bed0efSShaohua Li io->meta_offset += sizeof(struct r5l_payload_data_parity) +
842f6bed0efSShaohua Li sizeof(__le32) * (1 + !!checksum2_valid);
843f6bed0efSShaohua Li }
844f6bed0efSShaohua Li
r5l_append_payload_page(struct r5l_log * log,struct page * page)845f6bed0efSShaohua Li static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
846f6bed0efSShaohua Li {
847f6bed0efSShaohua Li struct r5l_io_unit *io = log->current_io;
848f6bed0efSShaohua Li
8496143e2ceSChristoph Hellwig if (io->need_split_bio) {
8503bddb7f8SSong Liu BUG_ON(io->split_bio);
8513bddb7f8SSong Liu io->split_bio = io->current_bio;
8526143e2ceSChristoph Hellwig io->current_bio = r5l_bio_alloc(log);
8533bddb7f8SSong Liu bio_chain(io->current_bio, io->split_bio);
8543bddb7f8SSong Liu io->need_split_bio = false;
855f6bed0efSShaohua Li }
856f6bed0efSShaohua Li
8576143e2ceSChristoph Hellwig if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
8586143e2ceSChristoph Hellwig BUG();
8596143e2ceSChristoph Hellwig
860c1b99198SChristoph Hellwig r5_reserve_log_entry(log, io);
861f6bed0efSShaohua Li }
862f6bed0efSShaohua Li
r5l_append_flush_payload(struct r5l_log * log,sector_t sect)863ea17481fSSong Liu static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
864ea17481fSSong Liu {
865ea17481fSSong Liu struct mddev *mddev = log->rdev->mddev;
866ea17481fSSong Liu struct r5conf *conf = mddev->private;
867ea17481fSSong Liu struct r5l_io_unit *io;
868ea17481fSSong Liu struct r5l_payload_flush *payload;
869ea17481fSSong Liu int meta_size;
870ea17481fSSong Liu
871ea17481fSSong Liu /*
872ea17481fSSong Liu * payload_flush requires extra writes to the journal.
873ea17481fSSong Liu * To avoid handling the extra IO in quiesce, just skip
874ea17481fSSong Liu * flush_payload
875ea17481fSSong Liu */
876ea17481fSSong Liu if (conf->quiesce)
877ea17481fSSong Liu return;
878ea17481fSSong Liu
879ea17481fSSong Liu mutex_lock(&log->io_mutex);
880ea17481fSSong Liu meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
881ea17481fSSong Liu
882ea17481fSSong Liu if (r5l_get_meta(log, meta_size)) {
883ea17481fSSong Liu mutex_unlock(&log->io_mutex);
884ea17481fSSong Liu return;
885ea17481fSSong Liu }
886ea17481fSSong Liu
887ea17481fSSong Liu /* current implementation is one stripe per flush payload */
888ea17481fSSong Liu io = log->current_io;
889ea17481fSSong Liu payload = page_address(io->meta_page) + io->meta_offset;
890ea17481fSSong Liu payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
891ea17481fSSong Liu payload->header.flags = cpu_to_le16(0);
892ea17481fSSong Liu payload->size = cpu_to_le32(sizeof(__le64));
893ea17481fSSong Liu payload->flush_stripes[0] = cpu_to_le64(sect);
894ea17481fSSong Liu io->meta_offset += meta_size;
895a9501d74SSong Liu /* multiple flush payloads count as one pending_stripe */
896a9501d74SSong Liu if (!io->has_flush_payload) {
897a9501d74SSong Liu io->has_flush_payload = 1;
898a9501d74SSong Liu atomic_inc(&io->pending_stripe);
899a9501d74SSong Liu }
900ea17481fSSong Liu mutex_unlock(&log->io_mutex);
901ea17481fSSong Liu }
902ea17481fSSong Liu
r5l_log_stripe(struct r5l_log * log,struct stripe_head * sh,int data_pages,int parity_pages)9035036c390SChristoph Hellwig static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
904f6bed0efSShaohua Li int data_pages, int parity_pages)
905f6bed0efSShaohua Li {
906f6bed0efSShaohua Li int i;
907f6bed0efSShaohua Li int meta_size;
9085036c390SChristoph Hellwig int ret;
909f6bed0efSShaohua Li struct r5l_io_unit *io;
910f6bed0efSShaohua Li
911f6bed0efSShaohua Li meta_size =
912f6bed0efSShaohua Li ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
913f6bed0efSShaohua Li * data_pages) +
914f6bed0efSShaohua Li sizeof(struct r5l_payload_data_parity) +
915f6bed0efSShaohua Li sizeof(__le32) * parity_pages;
916f6bed0efSShaohua Li
9175036c390SChristoph Hellwig ret = r5l_get_meta(log, meta_size);
9185036c390SChristoph Hellwig if (ret)
9195036c390SChristoph Hellwig return ret;
9205036c390SChristoph Hellwig
921f6bed0efSShaohua Li io = log->current_io;
922f6bed0efSShaohua Li
9233bddb7f8SSong Liu if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
9243bddb7f8SSong Liu io->has_flush = 1;
9253bddb7f8SSong Liu
926f6bed0efSShaohua Li for (i = 0; i < sh->disks; i++) {
9271e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
9281e6d690bSSong Liu test_bit(R5_InJournal, &sh->dev[i].flags))
929f6bed0efSShaohua Li continue;
930f6bed0efSShaohua Li if (i == sh->pd_idx || i == sh->qd_idx)
931f6bed0efSShaohua Li continue;
9323bddb7f8SSong Liu if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
9333bddb7f8SSong Liu log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
9343bddb7f8SSong Liu io->has_fua = 1;
9353bddb7f8SSong Liu /*
9363bddb7f8SSong Liu * we need to flush journal to make sure recovery can
9373bddb7f8SSong Liu * reach the data with fua flag
9383bddb7f8SSong Liu */
9393bddb7f8SSong Liu io->has_flush = 1;
9403bddb7f8SSong Liu }
941f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
942f6bed0efSShaohua Li raid5_compute_blocknr(sh, i, 0),
943f6bed0efSShaohua Li sh->dev[i].log_checksum, 0, false);
944f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[i].page);
945f6bed0efSShaohua Li }
946f6bed0efSShaohua Li
9472ded3703SSong Liu if (parity_pages == 2) {
948f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
949f6bed0efSShaohua Li sh->sector, sh->dev[sh->pd_idx].log_checksum,
950f6bed0efSShaohua Li sh->dev[sh->qd_idx].log_checksum, true);
951f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
952f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
9532ded3703SSong Liu } else if (parity_pages == 1) {
954f6bed0efSShaohua Li r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
955f6bed0efSShaohua Li sh->sector, sh->dev[sh->pd_idx].log_checksum,
956f6bed0efSShaohua Li 0, false);
957f6bed0efSShaohua Li r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
9582ded3703SSong Liu } else /* Just writing data, not parity, in caching phase */
9592ded3703SSong Liu BUG_ON(parity_pages != 0);
960f6bed0efSShaohua Li
961f6bed0efSShaohua Li list_add_tail(&sh->log_list, &io->stripe_list);
962f6bed0efSShaohua Li atomic_inc(&io->pending_stripe);
963f6bed0efSShaohua Li sh->log_io = io;
9645036c390SChristoph Hellwig
965a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
966a39f7afdSSong Liu return 0;
967a39f7afdSSong Liu
968a39f7afdSSong Liu if (sh->log_start == MaxSector) {
969a39f7afdSSong Liu BUG_ON(!list_empty(&sh->r5c));
970a39f7afdSSong Liu sh->log_start = io->log_start;
971a39f7afdSSong Liu spin_lock_irq(&log->stripe_in_journal_lock);
972a39f7afdSSong Liu list_add_tail(&sh->r5c,
973a39f7afdSSong Liu &log->stripe_in_journal_list);
974a39f7afdSSong Liu spin_unlock_irq(&log->stripe_in_journal_lock);
975a39f7afdSSong Liu atomic_inc(&log->stripe_in_journal_count);
976a39f7afdSSong Liu }
9775036c390SChristoph Hellwig return 0;
978f6bed0efSShaohua Li }
979f6bed0efSShaohua Li
980a39f7afdSSong Liu /* add stripe to no_space_stripes, and then wake up reclaim */
r5l_add_no_space_stripe(struct r5l_log * log,struct stripe_head * sh)981a39f7afdSSong Liu static inline void r5l_add_no_space_stripe(struct r5l_log *log,
982a39f7afdSSong Liu struct stripe_head *sh)
983a39f7afdSSong Liu {
984a39f7afdSSong Liu spin_lock(&log->no_space_stripes_lock);
985a39f7afdSSong Liu list_add_tail(&sh->log_list, &log->no_space_stripes);
986a39f7afdSSong Liu spin_unlock(&log->no_space_stripes_lock);
987a39f7afdSSong Liu }
988a39f7afdSSong Liu
989f6bed0efSShaohua Li /*
990f6bed0efSShaohua Li * running in raid5d, where reclaim could wait for raid5d too (when it flushes
991f6bed0efSShaohua Li * data from log to raid disks), so we shouldn't wait for reclaim here
992f6bed0efSShaohua Li */
r5l_write_stripe(struct r5l_log * log,struct stripe_head * sh)993f6bed0efSShaohua Li int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
994f6bed0efSShaohua Li {
995a39f7afdSSong Liu struct r5conf *conf = sh->raid_conf;
996f6bed0efSShaohua Li int write_disks = 0;
997f6bed0efSShaohua Li int data_pages, parity_pages;
998f6bed0efSShaohua Li int reserve;
999f6bed0efSShaohua Li int i;
10005036c390SChristoph Hellwig int ret = 0;
1001a39f7afdSSong Liu bool wake_reclaim = false;
1002f6bed0efSShaohua Li
1003f6bed0efSShaohua Li if (!log)
1004f6bed0efSShaohua Li return -EAGAIN;
1005f6bed0efSShaohua Li /* Don't support stripe batch */
1006f6bed0efSShaohua Li if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1007f6bed0efSShaohua Li test_bit(STRIPE_SYNCING, &sh->state)) {
1008f6bed0efSShaohua Li /* the stripe is written to log, we start writing it to raid */
1009f6bed0efSShaohua Li clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
1010f6bed0efSShaohua Li return -EAGAIN;
1011f6bed0efSShaohua Li }
1012f6bed0efSShaohua Li
10132ded3703SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
10142ded3703SSong Liu
1015f6bed0efSShaohua Li for (i = 0; i < sh->disks; i++) {
1016f6bed0efSShaohua Li void *addr;
1017f6bed0efSShaohua Li
10181e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
10191e6d690bSSong Liu test_bit(R5_InJournal, &sh->dev[i].flags))
1020f6bed0efSShaohua Li continue;
10211e6d690bSSong Liu
1022f6bed0efSShaohua Li write_disks++;
1023f6bed0efSShaohua Li /* checksum is already calculated in last run */
1024f6bed0efSShaohua Li if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1025f6bed0efSShaohua Li continue;
1026f6bed0efSShaohua Li addr = kmap_atomic(sh->dev[i].page);
10275cb2fbd6SShaohua Li sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1028f6bed0efSShaohua Li addr, PAGE_SIZE);
1029f6bed0efSShaohua Li kunmap_atomic(addr);
1030f6bed0efSShaohua Li }
1031f6bed0efSShaohua Li parity_pages = 1 + !!(sh->qd_idx >= 0);
1032f6bed0efSShaohua Li data_pages = write_disks - parity_pages;
1033f6bed0efSShaohua Li
1034f6bed0efSShaohua Li set_bit(STRIPE_LOG_TRAPPED, &sh->state);
1035253f9fd4SShaohua Li /*
1036253f9fd4SShaohua Li * The stripe must enter state machine again to finish the write, so
1037253f9fd4SShaohua Li * don't delay.
1038253f9fd4SShaohua Li */
1039253f9fd4SShaohua Li clear_bit(STRIPE_DELAYED, &sh->state);
1040f6bed0efSShaohua Li atomic_inc(&sh->count);
1041f6bed0efSShaohua Li
1042f6bed0efSShaohua Li mutex_lock(&log->io_mutex);
1043f6bed0efSShaohua Li /* meta + data */
1044f6bed0efSShaohua Li reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
1045f6bed0efSShaohua Li
1046a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1047a39f7afdSSong Liu if (!r5l_has_free_space(log, reserve)) {
1048a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
1049a39f7afdSSong Liu wake_reclaim = true;
10505036c390SChristoph Hellwig } else {
10515036c390SChristoph Hellwig ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
10525036c390SChristoph Hellwig if (ret) {
10535036c390SChristoph Hellwig spin_lock_irq(&log->io_list_lock);
1054a39f7afdSSong Liu list_add_tail(&sh->log_list,
1055a39f7afdSSong Liu &log->no_mem_stripes);
10565036c390SChristoph Hellwig spin_unlock_irq(&log->io_list_lock);
1057f6bed0efSShaohua Li }
10585036c390SChristoph Hellwig }
1059a39f7afdSSong Liu } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1060a39f7afdSSong Liu /*
1061a39f7afdSSong Liu * log space critical, do not process stripes that are
1062a39f7afdSSong Liu * not in cache yet (sh->log_start == MaxSector).
1063a39f7afdSSong Liu */
1064a39f7afdSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1065a39f7afdSSong Liu sh->log_start == MaxSector) {
1066a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
1067a39f7afdSSong Liu wake_reclaim = true;
1068a39f7afdSSong Liu reserve = 0;
1069a39f7afdSSong Liu } else if (!r5l_has_free_space(log, reserve)) {
1070a39f7afdSSong Liu if (sh->log_start == log->last_checkpoint)
1071a39f7afdSSong Liu BUG();
1072a39f7afdSSong Liu else
1073a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
1074a39f7afdSSong Liu } else {
1075a39f7afdSSong Liu ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1076a39f7afdSSong Liu if (ret) {
1077a39f7afdSSong Liu spin_lock_irq(&log->io_list_lock);
1078a39f7afdSSong Liu list_add_tail(&sh->log_list,
1079a39f7afdSSong Liu &log->no_mem_stripes);
1080a39f7afdSSong Liu spin_unlock_irq(&log->io_list_lock);
1081a39f7afdSSong Liu }
1082a39f7afdSSong Liu }
1083a39f7afdSSong Liu }
1084f6bed0efSShaohua Li
10855036c390SChristoph Hellwig mutex_unlock(&log->io_mutex);
1086a39f7afdSSong Liu if (wake_reclaim)
1087a39f7afdSSong Liu r5l_wake_reclaim(log, reserve);
1088f6bed0efSShaohua Li return 0;
1089f6bed0efSShaohua Li }
1090f6bed0efSShaohua Li
r5l_write_stripe_run(struct r5l_log * log)1091f6bed0efSShaohua Li void r5l_write_stripe_run(struct r5l_log *log)
1092f6bed0efSShaohua Li {
1093f6bed0efSShaohua Li if (!log)
1094f6bed0efSShaohua Li return;
1095f6bed0efSShaohua Li mutex_lock(&log->io_mutex);
1096f6bed0efSShaohua Li r5l_submit_current_io(log);
1097f6bed0efSShaohua Li mutex_unlock(&log->io_mutex);
1098f6bed0efSShaohua Li }
1099f6bed0efSShaohua Li
r5l_handle_flush_request(struct r5l_log * log,struct bio * bio)1100828cbe98SShaohua Li int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1101828cbe98SShaohua Li {
11023bddb7f8SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1103828cbe98SShaohua Li /*
11043bddb7f8SSong Liu * in write through (journal only)
11053bddb7f8SSong Liu * we flush log disk cache first, then write stripe data to
11063bddb7f8SSong Liu * raid disks. So if bio is finished, the log disk cache is
11073bddb7f8SSong Liu * flushed already. The recovery guarantees we can recovery
11083bddb7f8SSong Liu * the bio from log disk, so we don't need to flush again
1109828cbe98SShaohua Li */
1110828cbe98SShaohua Li if (bio->bi_iter.bi_size == 0) {
1111828cbe98SShaohua Li bio_endio(bio);
1112828cbe98SShaohua Li return 0;
1113828cbe98SShaohua Li }
11141eff9d32SJens Axboe bio->bi_opf &= ~REQ_PREFLUSH;
11153bddb7f8SSong Liu } else {
11163bddb7f8SSong Liu /* write back (with cache) */
11173bddb7f8SSong Liu if (bio->bi_iter.bi_size == 0) {
11183bddb7f8SSong Liu mutex_lock(&log->io_mutex);
11193bddb7f8SSong Liu r5l_get_meta(log, 0);
11203bddb7f8SSong Liu bio_list_add(&log->current_io->flush_barriers, bio);
11213bddb7f8SSong Liu log->current_io->has_flush = 1;
11223bddb7f8SSong Liu log->current_io->has_null_flush = 1;
11233bddb7f8SSong Liu atomic_inc(&log->current_io->pending_stripe);
11243bddb7f8SSong Liu r5l_submit_current_io(log);
11253bddb7f8SSong Liu mutex_unlock(&log->io_mutex);
11263bddb7f8SSong Liu return 0;
11273bddb7f8SSong Liu }
11283bddb7f8SSong Liu }
1129828cbe98SShaohua Li return -EAGAIN;
1130828cbe98SShaohua Li }
1131828cbe98SShaohua Li
1132f6bed0efSShaohua Li /* This will run after log space is reclaimed */
r5l_run_no_space_stripes(struct r5l_log * log)1133f6bed0efSShaohua Li static void r5l_run_no_space_stripes(struct r5l_log *log)
1134f6bed0efSShaohua Li {
1135f6bed0efSShaohua Li struct stripe_head *sh;
1136f6bed0efSShaohua Li
1137f6bed0efSShaohua Li spin_lock(&log->no_space_stripes_lock);
1138f6bed0efSShaohua Li while (!list_empty(&log->no_space_stripes)) {
1139f6bed0efSShaohua Li sh = list_first_entry(&log->no_space_stripes,
1140f6bed0efSShaohua Li struct stripe_head, log_list);
1141f6bed0efSShaohua Li list_del_init(&sh->log_list);
1142f6bed0efSShaohua Li set_bit(STRIPE_HANDLE, &sh->state);
1143f6bed0efSShaohua Li raid5_release_stripe(sh);
1144f6bed0efSShaohua Li }
1145f6bed0efSShaohua Li spin_unlock(&log->no_space_stripes_lock);
1146f6bed0efSShaohua Li }
1147f6bed0efSShaohua Li
1148a39f7afdSSong Liu /*
1149a39f7afdSSong Liu * calculate new last_checkpoint
1150a39f7afdSSong Liu * for write through mode, returns log->next_checkpoint
1151a39f7afdSSong Liu * for write back, returns log_start of first sh in stripe_in_journal_list
1152a39f7afdSSong Liu */
r5c_calculate_new_cp(struct r5conf * conf)1153a39f7afdSSong Liu static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1154a39f7afdSSong Liu {
1155a39f7afdSSong Liu struct stripe_head *sh;
115606a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
1157a39f7afdSSong Liu sector_t new_cp;
1158a39f7afdSSong Liu unsigned long flags;
1159a39f7afdSSong Liu
1160a39f7afdSSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1161a39f7afdSSong Liu return log->next_checkpoint;
1162a39f7afdSSong Liu
1163a39f7afdSSong Liu spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
116406a4d0d8SYu Kuai if (list_empty(&log->stripe_in_journal_list)) {
1165a39f7afdSSong Liu /* all stripes flushed */
1166d3014e21SDan Carpenter spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1167a39f7afdSSong Liu return log->next_checkpoint;
1168a39f7afdSSong Liu }
116906a4d0d8SYu Kuai sh = list_first_entry(&log->stripe_in_journal_list,
1170a39f7afdSSong Liu struct stripe_head, r5c);
1171a39f7afdSSong Liu new_cp = sh->log_start;
1172a39f7afdSSong Liu spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1173a39f7afdSSong Liu return new_cp;
1174a39f7afdSSong Liu }
1175a39f7afdSSong Liu
r5l_reclaimable_space(struct r5l_log * log)117617036461SChristoph Hellwig static sector_t r5l_reclaimable_space(struct r5l_log *log)
117717036461SChristoph Hellwig {
1178a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private;
1179a39f7afdSSong Liu
118017036461SChristoph Hellwig return r5l_ring_distance(log, log->last_checkpoint,
1181a39f7afdSSong Liu r5c_calculate_new_cp(conf));
118217036461SChristoph Hellwig }
118317036461SChristoph Hellwig
r5l_run_no_mem_stripe(struct r5l_log * log)11845036c390SChristoph Hellwig static void r5l_run_no_mem_stripe(struct r5l_log *log)
11855036c390SChristoph Hellwig {
11865036c390SChristoph Hellwig struct stripe_head *sh;
11875036c390SChristoph Hellwig
1188efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock);
11895036c390SChristoph Hellwig
11905036c390SChristoph Hellwig if (!list_empty(&log->no_mem_stripes)) {
11915036c390SChristoph Hellwig sh = list_first_entry(&log->no_mem_stripes,
11925036c390SChristoph Hellwig struct stripe_head, log_list);
11935036c390SChristoph Hellwig list_del_init(&sh->log_list);
11945036c390SChristoph Hellwig set_bit(STRIPE_HANDLE, &sh->state);
11955036c390SChristoph Hellwig raid5_release_stripe(sh);
11965036c390SChristoph Hellwig }
11975036c390SChristoph Hellwig }
11985036c390SChristoph Hellwig
r5l_complete_finished_ios(struct r5l_log * log)119904732f74SChristoph Hellwig static bool r5l_complete_finished_ios(struct r5l_log *log)
120017036461SChristoph Hellwig {
120117036461SChristoph Hellwig struct r5l_io_unit *io, *next;
120217036461SChristoph Hellwig bool found = false;
120317036461SChristoph Hellwig
1204efa4b77bSShaohua Li lockdep_assert_held(&log->io_list_lock);
120517036461SChristoph Hellwig
120604732f74SChristoph Hellwig list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
120717036461SChristoph Hellwig /* don't change list order */
120817036461SChristoph Hellwig if (io->state < IO_UNIT_STRIPE_END)
120917036461SChristoph Hellwig break;
121017036461SChristoph Hellwig
121117036461SChristoph Hellwig log->next_checkpoint = io->log_start;
121217036461SChristoph Hellwig
121317036461SChristoph Hellwig list_del(&io->log_sibling);
1214afeee514SKent Overstreet mempool_free(io, &log->io_pool);
12155036c390SChristoph Hellwig r5l_run_no_mem_stripe(log);
121617036461SChristoph Hellwig
121717036461SChristoph Hellwig found = true;
121817036461SChristoph Hellwig }
121917036461SChristoph Hellwig
122017036461SChristoph Hellwig return found;
122117036461SChristoph Hellwig }
122217036461SChristoph Hellwig
__r5l_stripe_write_finished(struct r5l_io_unit * io)1223509ffec7SChristoph Hellwig static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1224509ffec7SChristoph Hellwig {
1225509ffec7SChristoph Hellwig struct r5l_log *log = io->log;
1226a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private;
1227509ffec7SChristoph Hellwig unsigned long flags;
1228509ffec7SChristoph Hellwig
1229509ffec7SChristoph Hellwig spin_lock_irqsave(&log->io_list_lock, flags);
1230509ffec7SChristoph Hellwig __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
123117036461SChristoph Hellwig
123204732f74SChristoph Hellwig if (!r5l_complete_finished_ios(log)) {
123385f2f9a4SShaohua Li spin_unlock_irqrestore(&log->io_list_lock, flags);
123485f2f9a4SShaohua Li return;
123585f2f9a4SShaohua Li }
1236509ffec7SChristoph Hellwig
1237a39f7afdSSong Liu if (r5l_reclaimable_space(log) > log->max_free_space ||
1238a39f7afdSSong Liu test_bit(R5C_LOG_TIGHT, &conf->cache_state))
1239509ffec7SChristoph Hellwig r5l_wake_reclaim(log, 0);
1240509ffec7SChristoph Hellwig
1241509ffec7SChristoph Hellwig spin_unlock_irqrestore(&log->io_list_lock, flags);
1242509ffec7SChristoph Hellwig wake_up(&log->iounit_wait);
1243509ffec7SChristoph Hellwig }
1244509ffec7SChristoph Hellwig
r5l_stripe_write_finished(struct stripe_head * sh)12450576b1c6SShaohua Li void r5l_stripe_write_finished(struct stripe_head *sh)
12460576b1c6SShaohua Li {
12470576b1c6SShaohua Li struct r5l_io_unit *io;
12480576b1c6SShaohua Li
12490576b1c6SShaohua Li io = sh->log_io;
12500576b1c6SShaohua Li sh->log_io = NULL;
12510576b1c6SShaohua Li
1252509ffec7SChristoph Hellwig if (io && atomic_dec_and_test(&io->pending_stripe))
1253509ffec7SChristoph Hellwig __r5l_stripe_write_finished(io);
12540576b1c6SShaohua Li }
12550576b1c6SShaohua Li
r5l_log_flush_endio(struct bio * bio)1256a8c34f91SShaohua Li static void r5l_log_flush_endio(struct bio *bio)
1257a8c34f91SShaohua Li {
1258a8c34f91SShaohua Li struct r5l_log *log = container_of(bio, struct r5l_log,
1259a8c34f91SShaohua Li flush_bio);
1260a8c34f91SShaohua Li unsigned long flags;
1261a8c34f91SShaohua Li struct r5l_io_unit *io;
1262a8c34f91SShaohua Li
12634e4cbee9SChristoph Hellwig if (bio->bi_status)
12646e74a9cfSShaohua Li md_error(log->rdev->mddev, log->rdev);
12650d0bd28cSYu Kuai bio_uninit(bio);
12666e74a9cfSShaohua Li
1267a8c34f91SShaohua Li spin_lock_irqsave(&log->io_list_lock, flags);
1268d8858f43SChristoph Hellwig list_for_each_entry(io, &log->flushing_ios, log_sibling)
1269d8858f43SChristoph Hellwig r5l_io_run_stripes(io);
127004732f74SChristoph Hellwig list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1271a8c34f91SShaohua Li spin_unlock_irqrestore(&log->io_list_lock, flags);
1272a8c34f91SShaohua Li }
1273a8c34f91SShaohua Li
12740576b1c6SShaohua Li /*
12750576b1c6SShaohua Li * Starting dispatch IO to raid.
12760576b1c6SShaohua Li * io_unit(meta) consists of a log. There is one situation we want to avoid. A
12770576b1c6SShaohua Li * broken meta in the middle of a log causes recovery can't find meta at the
12780576b1c6SShaohua Li * head of log. If operations require meta at the head persistent in log, we
12790576b1c6SShaohua Li * must make sure meta before it persistent in log too. A case is:
12800576b1c6SShaohua Li *
12810576b1c6SShaohua Li * stripe data/parity is in log, we start write stripe to raid disks. stripe
12820576b1c6SShaohua Li * data/parity must be persistent in log before we do the write to raid disks.
12830576b1c6SShaohua Li *
12840576b1c6SShaohua Li * The solution is we restrictly maintain io_unit list order. In this case, we
12850576b1c6SShaohua Li * only write stripes of an io_unit to raid disks till the io_unit is the first
12860576b1c6SShaohua Li * one whose data/parity is in log.
12870576b1c6SShaohua Li */
r5l_flush_stripe_to_raid(struct r5l_log * log)12880576b1c6SShaohua Li void r5l_flush_stripe_to_raid(struct r5l_log *log)
12890576b1c6SShaohua Li {
1290a8c34f91SShaohua Li bool do_flush;
129156fef7c6SChristoph Hellwig
129256fef7c6SChristoph Hellwig if (!log || !log->need_cache_flush)
12930576b1c6SShaohua Li return;
12940576b1c6SShaohua Li
1295a8c34f91SShaohua Li spin_lock_irq(&log->io_list_lock);
1296a8c34f91SShaohua Li /* flush bio is running */
1297a8c34f91SShaohua Li if (!list_empty(&log->flushing_ios)) {
1298a8c34f91SShaohua Li spin_unlock_irq(&log->io_list_lock);
12990576b1c6SShaohua Li return;
13000576b1c6SShaohua Li }
1301a8c34f91SShaohua Li list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1302a8c34f91SShaohua Li do_flush = !list_empty(&log->flushing_ios);
13030576b1c6SShaohua Li spin_unlock_irq(&log->io_list_lock);
1304a8c34f91SShaohua Li
1305a8c34f91SShaohua Li if (!do_flush)
1306a8c34f91SShaohua Li return;
13070dd00cbaSChristoph Hellwig bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1308a7c50c94SChristoph Hellwig REQ_OP_WRITE | REQ_PREFLUSH);
1309a8c34f91SShaohua Li log->flush_bio.bi_end_io = r5l_log_flush_endio;
13104e49ea4aSMike Christie submit_bio(&log->flush_bio);
13110576b1c6SShaohua Li }
13120576b1c6SShaohua Li
13130576b1c6SShaohua Li static void r5l_write_super(struct r5l_log *log, sector_t cp);
r5l_write_super_and_discard_space(struct r5l_log * log,sector_t end)13144b482044SShaohua Li static void r5l_write_super_and_discard_space(struct r5l_log *log,
13154b482044SShaohua Li sector_t end)
13164b482044SShaohua Li {
13174b482044SShaohua Li struct block_device *bdev = log->rdev->bdev;
13184b482044SShaohua Li struct mddev *mddev;
13194b482044SShaohua Li
13204b482044SShaohua Li r5l_write_super(log, end);
13214b482044SShaohua Li
132270200574SChristoph Hellwig if (!bdev_max_discard_sectors(bdev))
13234b482044SShaohua Li return;
13244b482044SShaohua Li
13254b482044SShaohua Li mddev = log->rdev->mddev;
13264b482044SShaohua Li /*
13278e018c21SShaohua Li * Discard could zero data, so before discard we must make sure
13288e018c21SShaohua Li * superblock is updated to new log tail. Updating superblock (either
13298e018c21SShaohua Li * directly call md_update_sb() or depend on md thread) must hold
13308e018c21SShaohua Li * reconfig mutex. On the other hand, raid5_quiesce is called with
133112ba6676SXU pengfei * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
133212ba6676SXU pengfei * for all IO finish, hence waiting for reclaim thread, while reclaim
133312ba6676SXU pengfei * thread is calling this function and waiting for reconfig mutex. So
13348e018c21SShaohua Li * there is a deadlock. We workaround this issue with a trylock.
13358e018c21SShaohua Li * FIXME: we could miss discard if we can't take reconfig mutex
13364b482044SShaohua Li */
13372953079cSShaohua Li set_mask_bits(&mddev->sb_flags, 0,
13382953079cSShaohua Li BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
13398e018c21SShaohua Li if (!mddev_trylock(mddev))
13408e018c21SShaohua Li return;
13414b482044SShaohua Li md_update_sb(mddev, 1);
13428e018c21SShaohua Li mddev_unlock(mddev);
13434b482044SShaohua Li
13446e74a9cfSShaohua Li /* discard IO error really doesn't matter, ignore it */
13454b482044SShaohua Li if (log->last_checkpoint < end) {
13464b482044SShaohua Li blkdev_issue_discard(bdev,
13474b482044SShaohua Li log->last_checkpoint + log->rdev->data_offset,
134844abff2cSChristoph Hellwig end - log->last_checkpoint, GFP_NOIO);
13494b482044SShaohua Li } else {
13504b482044SShaohua Li blkdev_issue_discard(bdev,
13514b482044SShaohua Li log->last_checkpoint + log->rdev->data_offset,
13524b482044SShaohua Li log->device_size - log->last_checkpoint,
135344abff2cSChristoph Hellwig GFP_NOIO);
13544b482044SShaohua Li blkdev_issue_discard(bdev, log->rdev->data_offset, end,
135544abff2cSChristoph Hellwig GFP_NOIO);
13564b482044SShaohua Li }
13574b482044SShaohua Li }
13584b482044SShaohua Li
1359a39f7afdSSong Liu /*
1360a39f7afdSSong Liu * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1361a39f7afdSSong Liu * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1362a39f7afdSSong Liu *
1363a39f7afdSSong Liu * must hold conf->device_lock
1364a39f7afdSSong Liu */
r5c_flush_stripe(struct r5conf * conf,struct stripe_head * sh)1365a39f7afdSSong Liu static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1366a39f7afdSSong Liu {
1367a39f7afdSSong Liu BUG_ON(list_empty(&sh->lru));
1368a39f7afdSSong Liu BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1369a39f7afdSSong Liu BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1370a39f7afdSSong Liu
1371a39f7afdSSong Liu /*
1372a39f7afdSSong Liu * The stripe is not ON_RELEASE_LIST, so it is safe to call
1373a39f7afdSSong Liu * raid5_release_stripe() while holding conf->device_lock
1374a39f7afdSSong Liu */
1375a39f7afdSSong Liu BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1376efa4b77bSShaohua Li lockdep_assert_held(&conf->device_lock);
1377a39f7afdSSong Liu
1378a39f7afdSSong Liu list_del_init(&sh->lru);
1379a39f7afdSSong Liu atomic_inc(&sh->count);
1380a39f7afdSSong Liu
1381a39f7afdSSong Liu set_bit(STRIPE_HANDLE, &sh->state);
1382a39f7afdSSong Liu atomic_inc(&conf->active_stripes);
1383a39f7afdSSong Liu r5c_make_stripe_write_out(sh);
1384a39f7afdSSong Liu
1385e33fbb9cSShaohua Li if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1386e33fbb9cSShaohua Li atomic_inc(&conf->r5c_flushing_partial_stripes);
1387e33fbb9cSShaohua Li else
1388e33fbb9cSShaohua Li atomic_inc(&conf->r5c_flushing_full_stripes);
1389a39f7afdSSong Liu raid5_release_stripe(sh);
1390a39f7afdSSong Liu }
1391a39f7afdSSong Liu
1392a39f7afdSSong Liu /*
1393a39f7afdSSong Liu * if num == 0, flush all full stripes
1394a39f7afdSSong Liu * if num > 0, flush all full stripes. If less than num full stripes are
1395a39f7afdSSong Liu * flushed, flush some partial stripes until totally num stripes are
1396a39f7afdSSong Liu * flushed or there is no more cached stripes.
1397a39f7afdSSong Liu */
r5c_flush_cache(struct r5conf * conf,int num)1398a39f7afdSSong Liu void r5c_flush_cache(struct r5conf *conf, int num)
1399a39f7afdSSong Liu {
1400a39f7afdSSong Liu int count;
1401a39f7afdSSong Liu struct stripe_head *sh, *next;
1402a39f7afdSSong Liu
1403efa4b77bSShaohua Li lockdep_assert_held(&conf->device_lock);
140406a4d0d8SYu Kuai if (!READ_ONCE(conf->log))
1405a39f7afdSSong Liu return;
1406a39f7afdSSong Liu
1407a39f7afdSSong Liu count = 0;
1408a39f7afdSSong Liu list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1409a39f7afdSSong Liu r5c_flush_stripe(conf, sh);
1410a39f7afdSSong Liu count++;
1411a39f7afdSSong Liu }
1412a39f7afdSSong Liu
1413a39f7afdSSong Liu if (count >= num)
1414a39f7afdSSong Liu return;
1415a39f7afdSSong Liu list_for_each_entry_safe(sh, next,
1416a39f7afdSSong Liu &conf->r5c_partial_stripe_list, lru) {
1417a39f7afdSSong Liu r5c_flush_stripe(conf, sh);
1418a39f7afdSSong Liu if (++count >= num)
1419a39f7afdSSong Liu break;
1420a39f7afdSSong Liu }
1421a39f7afdSSong Liu }
1422a39f7afdSSong Liu
r5c_do_reclaim(struct r5conf * conf)1423a39f7afdSSong Liu static void r5c_do_reclaim(struct r5conf *conf)
1424a39f7afdSSong Liu {
142506a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
1426a39f7afdSSong Liu struct stripe_head *sh;
1427a39f7afdSSong Liu int count = 0;
1428a39f7afdSSong Liu unsigned long flags;
1429a39f7afdSSong Liu int total_cached;
1430a39f7afdSSong Liu int stripes_to_flush;
1431e33fbb9cSShaohua Li int flushing_partial, flushing_full;
1432a39f7afdSSong Liu
1433a39f7afdSSong Liu if (!r5c_is_writeback(log))
1434a39f7afdSSong Liu return;
1435a39f7afdSSong Liu
1436e33fbb9cSShaohua Li flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1437e33fbb9cSShaohua Li flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
1438a39f7afdSSong Liu total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
1439e33fbb9cSShaohua Li atomic_read(&conf->r5c_cached_full_stripes) -
1440e33fbb9cSShaohua Li flushing_full - flushing_partial;
1441a39f7afdSSong Liu
1442a39f7afdSSong Liu if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1443a39f7afdSSong Liu atomic_read(&conf->empty_inactive_list_nr) > 0)
1444a39f7afdSSong Liu /*
1445a39f7afdSSong Liu * if stripe cache pressure high, flush all full stripes and
1446a39f7afdSSong Liu * some partial stripes
1447a39f7afdSSong Liu */
1448a39f7afdSSong Liu stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1449a39f7afdSSong Liu else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
1450e33fbb9cSShaohua Li atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
145184890c03SShaohua Li R5C_FULL_STRIPE_FLUSH_BATCH(conf))
1452a39f7afdSSong Liu /*
1453a39f7afdSSong Liu * if stripe cache pressure moderate, or if there is many full
1454a39f7afdSSong Liu * stripes,flush all full stripes
1455a39f7afdSSong Liu */
1456a39f7afdSSong Liu stripes_to_flush = 0;
1457a39f7afdSSong Liu else
1458a39f7afdSSong Liu /* no need to flush */
1459a39f7afdSSong Liu stripes_to_flush = -1;
1460a39f7afdSSong Liu
1461a39f7afdSSong Liu if (stripes_to_flush >= 0) {
1462a39f7afdSSong Liu spin_lock_irqsave(&conf->device_lock, flags);
1463a39f7afdSSong Liu r5c_flush_cache(conf, stripes_to_flush);
1464a39f7afdSSong Liu spin_unlock_irqrestore(&conf->device_lock, flags);
1465a39f7afdSSong Liu }
1466a39f7afdSSong Liu
1467a39f7afdSSong Liu /* if log space is tight, flush stripes on stripe_in_journal_list */
1468a39f7afdSSong Liu if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1469a39f7afdSSong Liu spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1470a39f7afdSSong Liu spin_lock(&conf->device_lock);
1471a39f7afdSSong Liu list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1472a39f7afdSSong Liu /*
1473a39f7afdSSong Liu * stripes on stripe_in_journal_list could be in any
1474a39f7afdSSong Liu * state of the stripe_cache state machine. In this
1475a39f7afdSSong Liu * case, we only want to flush stripe on
1476a39f7afdSSong Liu * r5c_cached_full/partial_stripes. The following
1477a39f7afdSSong Liu * condition makes sure the stripe is on one of the
1478a39f7afdSSong Liu * two lists.
1479a39f7afdSSong Liu */
1480a39f7afdSSong Liu if (!list_empty(&sh->lru) &&
1481a39f7afdSSong Liu !test_bit(STRIPE_HANDLE, &sh->state) &&
1482a39f7afdSSong Liu atomic_read(&sh->count) == 0) {
1483a39f7afdSSong Liu r5c_flush_stripe(conf, sh);
1484a39f7afdSSong Liu if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1485a39f7afdSSong Liu break;
1486a39f7afdSSong Liu }
1487e8fd52eeSShaohua Li }
1488a39f7afdSSong Liu spin_unlock(&conf->device_lock);
1489a39f7afdSSong Liu spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1490a39f7afdSSong Liu }
1491f687a33eSSong Liu
1492f687a33eSSong Liu if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1493f687a33eSSong Liu r5l_run_no_space_stripes(log);
1494f687a33eSSong Liu
1495a39f7afdSSong Liu md_wakeup_thread(conf->mddev->thread);
1496a39f7afdSSong Liu }
14974b482044SShaohua Li
r5l_do_reclaim(struct r5l_log * log)14980576b1c6SShaohua Li static void r5l_do_reclaim(struct r5l_log *log)
14990576b1c6SShaohua Li {
1500a39f7afdSSong Liu struct r5conf *conf = log->rdev->mddev->private;
15010576b1c6SShaohua Li sector_t reclaim_target = xchg(&log->reclaim_target, 0);
150217036461SChristoph Hellwig sector_t reclaimable;
150317036461SChristoph Hellwig sector_t next_checkpoint;
1504a39f7afdSSong Liu bool write_super;
15050576b1c6SShaohua Li
15060576b1c6SShaohua Li spin_lock_irq(&log->io_list_lock);
1507a39f7afdSSong Liu write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1508a39f7afdSSong Liu reclaim_target != 0 || !list_empty(&log->no_space_stripes);
15090576b1c6SShaohua Li /*
15100576b1c6SShaohua Li * move proper io_unit to reclaim list. We should not change the order.
15110576b1c6SShaohua Li * reclaimable/unreclaimable io_unit can be mixed in the list, we
15120576b1c6SShaohua Li * shouldn't reuse space of an unreclaimable io_unit
15130576b1c6SShaohua Li */
15140576b1c6SShaohua Li while (1) {
151517036461SChristoph Hellwig reclaimable = r5l_reclaimable_space(log);
151617036461SChristoph Hellwig if (reclaimable >= reclaim_target ||
15170576b1c6SShaohua Li (list_empty(&log->running_ios) &&
15180576b1c6SShaohua Li list_empty(&log->io_end_ios) &&
1519a8c34f91SShaohua Li list_empty(&log->flushing_ios) &&
152004732f74SChristoph Hellwig list_empty(&log->finished_ios)))
15210576b1c6SShaohua Li break;
15220576b1c6SShaohua Li
152317036461SChristoph Hellwig md_wakeup_thread(log->rdev->mddev->thread);
152417036461SChristoph Hellwig wait_event_lock_irq(log->iounit_wait,
152517036461SChristoph Hellwig r5l_reclaimable_space(log) > reclaimable,
152617036461SChristoph Hellwig log->io_list_lock);
15270576b1c6SShaohua Li }
152817036461SChristoph Hellwig
1529a39f7afdSSong Liu next_checkpoint = r5c_calculate_new_cp(conf);
15300576b1c6SShaohua Li spin_unlock_irq(&log->io_list_lock);
15310576b1c6SShaohua Li
1532a39f7afdSSong Liu if (reclaimable == 0 || !write_super)
15330576b1c6SShaohua Li return;
15340576b1c6SShaohua Li
15350576b1c6SShaohua Li /*
15360576b1c6SShaohua Li * write_super will flush cache of each raid disk. We must write super
15370576b1c6SShaohua Li * here, because the log area might be reused soon and we don't want to
15380576b1c6SShaohua Li * confuse recovery
15390576b1c6SShaohua Li */
15404b482044SShaohua Li r5l_write_super_and_discard_space(log, next_checkpoint);
15410576b1c6SShaohua Li
15420576b1c6SShaohua Li mutex_lock(&log->io_mutex);
154317036461SChristoph Hellwig log->last_checkpoint = next_checkpoint;
1544a39f7afdSSong Liu r5c_update_log_state(log);
15450576b1c6SShaohua Li mutex_unlock(&log->io_mutex);
15460576b1c6SShaohua Li
154717036461SChristoph Hellwig r5l_run_no_space_stripes(log);
15480576b1c6SShaohua Li }
15490576b1c6SShaohua Li
r5l_reclaim_thread(struct md_thread * thread)15500576b1c6SShaohua Li static void r5l_reclaim_thread(struct md_thread *thread)
15510576b1c6SShaohua Li {
15520576b1c6SShaohua Li struct mddev *mddev = thread->mddev;
15530576b1c6SShaohua Li struct r5conf *conf = mddev->private;
155406a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
15550576b1c6SShaohua Li
15560576b1c6SShaohua Li if (!log)
15570576b1c6SShaohua Li return;
1558a39f7afdSSong Liu r5c_do_reclaim(conf);
15590576b1c6SShaohua Li r5l_do_reclaim(log);
15600576b1c6SShaohua Li }
15610576b1c6SShaohua Li
r5l_wake_reclaim(struct r5l_log * log,sector_t space)1562a39f7afdSSong Liu void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1563f6bed0efSShaohua Li {
15640576b1c6SShaohua Li unsigned long target;
15650576b1c6SShaohua Li unsigned long new = (unsigned long)space; /* overflow in theory */
15660576b1c6SShaohua Li
1567a39f7afdSSong Liu if (!log)
1568a39f7afdSSong Liu return;
15699487a0f6SUros Bizjak
15709487a0f6SUros Bizjak target = READ_ONCE(log->reclaim_target);
15710576b1c6SShaohua Li do {
15720576b1c6SShaohua Li if (new < target)
15730576b1c6SShaohua Li return;
15749487a0f6SUros Bizjak } while (!try_cmpxchg(&log->reclaim_target, &target, new));
15750576b1c6SShaohua Li md_wakeup_thread(log->reclaim_thread);
1576f6bed0efSShaohua Li }
1577f6bed0efSShaohua Li
r5l_quiesce(struct r5l_log * log,int quiesce)1578b03e0ccbSNeilBrown void r5l_quiesce(struct r5l_log *log, int quiesce)
1579e6c033f7SShaohua Li {
158044693154SYu Kuai struct mddev *mddev = log->rdev->mddev;
158144693154SYu Kuai struct md_thread *thread = rcu_dereference_protected(
158244693154SYu Kuai log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1583b03e0ccbSNeilBrown
1584b03e0ccbSNeilBrown if (quiesce) {
15854b482044SShaohua Li /* make sure r5l_write_super_and_discard_space exits */
15864b482044SShaohua Li wake_up(&mddev->sb_wait);
158744693154SYu Kuai kthread_park(thread->tsk);
1588a39f7afdSSong Liu r5l_wake_reclaim(log, MaxSector);
1589e6c033f7SShaohua Li r5l_do_reclaim(log);
1590b03e0ccbSNeilBrown } else
159144693154SYu Kuai kthread_unpark(thread->tsk);
1592e6c033f7SShaohua Li }
1593e6c033f7SShaohua Li
r5l_log_disk_error(struct r5conf * conf)15946e74a9cfSShaohua Li bool r5l_log_disk_error(struct r5conf *conf)
15956e74a9cfSShaohua Li {
159606a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
1597f6b6ec5cSShaohua Li
15987769085cSLogan Gunthorpe /* don't allow write if journal disk is missing */
1599f6b6ec5cSShaohua Li if (!log)
16007769085cSLogan Gunthorpe return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1601f6b6ec5cSShaohua Li else
16027769085cSLogan Gunthorpe return test_bit(Faulty, &log->rdev->flags);
16036e74a9cfSShaohua Li }
16046e74a9cfSShaohua Li
1605effe6ee7SSong Liu #define R5L_RECOVERY_PAGE_POOL_SIZE 256
1606effe6ee7SSong Liu
1607355810d1SShaohua Li struct r5l_recovery_ctx {
1608355810d1SShaohua Li struct page *meta_page; /* current meta */
1609355810d1SShaohua Li sector_t meta_total_blocks; /* total size of current meta and data */
1610355810d1SShaohua Li sector_t pos; /* recovery position */
1611355810d1SShaohua Li u64 seq; /* recovery position seq */
1612b4c625c6SSong Liu int data_parity_stripes; /* number of data_parity stripes */
1613b4c625c6SSong Liu int data_only_stripes; /* number of data_only stripes */
1614b4c625c6SSong Liu struct list_head cached_list;
1615effe6ee7SSong Liu
1616effe6ee7SSong Liu /*
1617effe6ee7SSong Liu * read ahead page pool (ra_pool)
1618effe6ee7SSong Liu * in recovery, log is read sequentially. It is not efficient to
1619effe6ee7SSong Liu * read every page with sync_page_io(). The read ahead page pool
1620effe6ee7SSong Liu * reads multiple pages with one IO, so further log read can
1621effe6ee7SSong Liu * just copy data from the pool.
1622effe6ee7SSong Liu */
1623effe6ee7SSong Liu struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
162489f94b64SChristoph Hellwig struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
1625effe6ee7SSong Liu sector_t pool_offset; /* offset of first page in the pool */
1626effe6ee7SSong Liu int total_pages; /* total allocated pages */
1627effe6ee7SSong Liu int valid_pages; /* pages with valid data */
1628355810d1SShaohua Li };
1629355810d1SShaohua Li
r5l_recovery_allocate_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1630effe6ee7SSong Liu static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1631effe6ee7SSong Liu struct r5l_recovery_ctx *ctx)
1632effe6ee7SSong Liu {
1633effe6ee7SSong Liu struct page *page;
1634effe6ee7SSong Liu
1635effe6ee7SSong Liu ctx->valid_pages = 0;
1636effe6ee7SSong Liu ctx->total_pages = 0;
1637effe6ee7SSong Liu while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1638effe6ee7SSong Liu page = alloc_page(GFP_KERNEL);
1639effe6ee7SSong Liu
1640effe6ee7SSong Liu if (!page)
1641effe6ee7SSong Liu break;
1642effe6ee7SSong Liu ctx->ra_pool[ctx->total_pages] = page;
1643effe6ee7SSong Liu ctx->total_pages += 1;
1644effe6ee7SSong Liu }
1645effe6ee7SSong Liu
164689f94b64SChristoph Hellwig if (ctx->total_pages == 0)
1647effe6ee7SSong Liu return -ENOMEM;
1648effe6ee7SSong Liu
1649effe6ee7SSong Liu ctx->pool_offset = 0;
1650effe6ee7SSong Liu return 0;
1651effe6ee7SSong Liu }
1652effe6ee7SSong Liu
r5l_recovery_free_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1653effe6ee7SSong Liu static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1654effe6ee7SSong Liu struct r5l_recovery_ctx *ctx)
1655effe6ee7SSong Liu {
1656effe6ee7SSong Liu int i;
1657effe6ee7SSong Liu
1658effe6ee7SSong Liu for (i = 0; i < ctx->total_pages; ++i)
1659effe6ee7SSong Liu put_page(ctx->ra_pool[i]);
1660effe6ee7SSong Liu }
1661effe6ee7SSong Liu
1662effe6ee7SSong Liu /*
1663effe6ee7SSong Liu * fetch ctx->valid_pages pages from offset
1664effe6ee7SSong Liu * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1665effe6ee7SSong Liu * However, if the offset is close to the end of the journal device,
1666effe6ee7SSong Liu * ctx->valid_pages could be smaller than ctx->total_pages
1667effe6ee7SSong Liu */
r5l_recovery_fetch_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx,sector_t offset)1668effe6ee7SSong Liu static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1669effe6ee7SSong Liu struct r5l_recovery_ctx *ctx,
1670effe6ee7SSong Liu sector_t offset)
1671effe6ee7SSong Liu {
167289f94b64SChristoph Hellwig struct bio bio;
167389f94b64SChristoph Hellwig int ret;
167489f94b64SChristoph Hellwig
167589f94b64SChristoph Hellwig bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
167689f94b64SChristoph Hellwig R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
167789f94b64SChristoph Hellwig bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1678effe6ee7SSong Liu
1679effe6ee7SSong Liu ctx->valid_pages = 0;
1680effe6ee7SSong Liu ctx->pool_offset = offset;
1681effe6ee7SSong Liu
1682effe6ee7SSong Liu while (ctx->valid_pages < ctx->total_pages) {
168389f94b64SChristoph Hellwig __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
168489f94b64SChristoph Hellwig 0);
1685effe6ee7SSong Liu ctx->valid_pages += 1;
1686effe6ee7SSong Liu
1687effe6ee7SSong Liu offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1688effe6ee7SSong Liu
1689effe6ee7SSong Liu if (offset == 0) /* reached end of the device */
1690effe6ee7SSong Liu break;
1691effe6ee7SSong Liu }
1692effe6ee7SSong Liu
169389f94b64SChristoph Hellwig ret = submit_bio_wait(&bio);
169489f94b64SChristoph Hellwig bio_uninit(&bio);
169589f94b64SChristoph Hellwig return ret;
1696effe6ee7SSong Liu }
1697effe6ee7SSong Liu
1698effe6ee7SSong Liu /*
1699effe6ee7SSong Liu * try read a page from the read ahead page pool, if the page is not in the
1700effe6ee7SSong Liu * pool, call r5l_recovery_fetch_ra_pool
1701effe6ee7SSong Liu */
r5l_recovery_read_page(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct page * page,sector_t offset)1702effe6ee7SSong Liu static int r5l_recovery_read_page(struct r5l_log *log,
1703effe6ee7SSong Liu struct r5l_recovery_ctx *ctx,
1704effe6ee7SSong Liu struct page *page,
1705effe6ee7SSong Liu sector_t offset)
1706effe6ee7SSong Liu {
1707effe6ee7SSong Liu int ret;
1708effe6ee7SSong Liu
1709effe6ee7SSong Liu if (offset < ctx->pool_offset ||
1710effe6ee7SSong Liu offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1711effe6ee7SSong Liu ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1712effe6ee7SSong Liu if (ret)
1713effe6ee7SSong Liu return ret;
1714effe6ee7SSong Liu }
1715effe6ee7SSong Liu
1716effe6ee7SSong Liu BUG_ON(offset < ctx->pool_offset ||
1717effe6ee7SSong Liu offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1718effe6ee7SSong Liu
1719effe6ee7SSong Liu memcpy(page_address(page),
1720effe6ee7SSong Liu page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1721effe6ee7SSong Liu BLOCK_SECTOR_SHIFT]),
1722effe6ee7SSong Liu PAGE_SIZE);
1723effe6ee7SSong Liu return 0;
1724effe6ee7SSong Liu }
1725effe6ee7SSong Liu
r5l_recovery_read_meta_block(struct r5l_log * log,struct r5l_recovery_ctx * ctx)17269ed988f5SSong Liu static int r5l_recovery_read_meta_block(struct r5l_log *log,
1727355810d1SShaohua Li struct r5l_recovery_ctx *ctx)
1728355810d1SShaohua Li {
1729355810d1SShaohua Li struct page *page = ctx->meta_page;
1730355810d1SShaohua Li struct r5l_meta_block *mb;
1731355810d1SShaohua Li u32 crc, stored_crc;
1732effe6ee7SSong Liu int ret;
1733355810d1SShaohua Li
1734effe6ee7SSong Liu ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1735effe6ee7SSong Liu if (ret != 0)
1736effe6ee7SSong Liu return ret;
1737355810d1SShaohua Li
1738355810d1SShaohua Li mb = page_address(page);
1739355810d1SShaohua Li stored_crc = le32_to_cpu(mb->checksum);
1740355810d1SShaohua Li mb->checksum = 0;
1741355810d1SShaohua Li
1742355810d1SShaohua Li if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1743355810d1SShaohua Li le64_to_cpu(mb->seq) != ctx->seq ||
1744355810d1SShaohua Li mb->version != R5LOG_VERSION ||
1745355810d1SShaohua Li le64_to_cpu(mb->position) != ctx->pos)
1746355810d1SShaohua Li return -EINVAL;
1747355810d1SShaohua Li
17485cb2fbd6SShaohua Li crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1749355810d1SShaohua Li if (stored_crc != crc)
1750355810d1SShaohua Li return -EINVAL;
1751355810d1SShaohua Li
1752355810d1SShaohua Li if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1753355810d1SShaohua Li return -EINVAL;
1754355810d1SShaohua Li
1755355810d1SShaohua Li ctx->meta_total_blocks = BLOCK_SECTORS;
1756355810d1SShaohua Li
1757355810d1SShaohua Li return 0;
1758355810d1SShaohua Li }
1759355810d1SShaohua Li
17609ed988f5SSong Liu static void
r5l_recovery_create_empty_meta_block(struct r5l_log * log,struct page * page,sector_t pos,u64 seq)17619ed988f5SSong Liu r5l_recovery_create_empty_meta_block(struct r5l_log *log,
17629ed988f5SSong Liu struct page *page,
17639ed988f5SSong Liu sector_t pos, u64 seq)
1764355810d1SShaohua Li {
1765355810d1SShaohua Li struct r5l_meta_block *mb;
1766355810d1SShaohua Li
1767355810d1SShaohua Li mb = page_address(page);
17689ed988f5SSong Liu clear_page(mb);
1769355810d1SShaohua Li mb->magic = cpu_to_le32(R5LOG_MAGIC);
1770355810d1SShaohua Li mb->version = R5LOG_VERSION;
1771355810d1SShaohua Li mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1772355810d1SShaohua Li mb->seq = cpu_to_le64(seq);
1773355810d1SShaohua Li mb->position = cpu_to_le64(pos);
1774355810d1SShaohua Li }
1775355810d1SShaohua Li
r5l_log_write_empty_meta_block(struct r5l_log * log,sector_t pos,u64 seq)1776355810d1SShaohua Li static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1777355810d1SShaohua Li u64 seq)
1778355810d1SShaohua Li {
1779355810d1SShaohua Li struct page *page;
1780355810d1SShaohua Li struct r5l_meta_block *mb;
1781355810d1SShaohua Li
17829ed988f5SSong Liu page = alloc_page(GFP_KERNEL);
1783355810d1SShaohua Li if (!page)
1784355810d1SShaohua Li return -ENOMEM;
17859ed988f5SSong Liu r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1786355810d1SShaohua Li mb = page_address(page);
17875c88f403SSong Liu mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
17885c88f403SSong Liu mb, PAGE_SIZE));
17894ce4c73fSBart Van Assche if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
17905a8948f8SJan Kara REQ_SYNC | REQ_FUA, false)) {
1791355810d1SShaohua Li __free_page(page);
1792355810d1SShaohua Li return -EIO;
1793355810d1SShaohua Li }
1794355810d1SShaohua Li __free_page(page);
1795355810d1SShaohua Li return 0;
1796355810d1SShaohua Li }
1797355810d1SShaohua Li
1798b4c625c6SSong Liu /*
1799b4c625c6SSong Liu * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1800b4c625c6SSong Liu * to mark valid (potentially not flushed) data in the journal.
1801b4c625c6SSong Liu *
1802b4c625c6SSong Liu * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1803b4c625c6SSong Liu * so there should not be any mismatch here.
1804b4c625c6SSong Liu */
r5l_recovery_load_data(struct r5l_log * log,struct stripe_head * sh,struct r5l_recovery_ctx * ctx,struct r5l_payload_data_parity * payload,sector_t log_offset)1805b4c625c6SSong Liu static void r5l_recovery_load_data(struct r5l_log *log,
1806b4c625c6SSong Liu struct stripe_head *sh,
1807b4c625c6SSong Liu struct r5l_recovery_ctx *ctx,
1808b4c625c6SSong Liu struct r5l_payload_data_parity *payload,
1809b4c625c6SSong Liu sector_t log_offset)
1810f6bed0efSShaohua Li {
1811b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
1812b4c625c6SSong Liu struct r5conf *conf = mddev->private;
1813b4c625c6SSong Liu int dd_idx;
1814355810d1SShaohua Li
1815b4c625c6SSong Liu raid5_compute_sector(conf,
1816b4c625c6SSong Liu le64_to_cpu(payload->location), 0,
1817b4c625c6SSong Liu &dd_idx, sh);
1818effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1819b4c625c6SSong Liu sh->dev[dd_idx].log_checksum =
1820b4c625c6SSong Liu le32_to_cpu(payload->checksum[0]);
1821b4c625c6SSong Liu ctx->meta_total_blocks += BLOCK_SECTORS;
1822b4c625c6SSong Liu
1823b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1824b4c625c6SSong Liu set_bit(STRIPE_R5C_CACHING, &sh->state);
1825b4c625c6SSong Liu }
1826b4c625c6SSong Liu
r5l_recovery_load_parity(struct r5l_log * log,struct stripe_head * sh,struct r5l_recovery_ctx * ctx,struct r5l_payload_data_parity * payload,sector_t log_offset)1827b4c625c6SSong Liu static void r5l_recovery_load_parity(struct r5l_log *log,
1828b4c625c6SSong Liu struct stripe_head *sh,
1829b4c625c6SSong Liu struct r5l_recovery_ctx *ctx,
1830b4c625c6SSong Liu struct r5l_payload_data_parity *payload,
1831b4c625c6SSong Liu sector_t log_offset)
1832b4c625c6SSong Liu {
1833b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
1834b4c625c6SSong Liu struct r5conf *conf = mddev->private;
1835b4c625c6SSong Liu
1836b4c625c6SSong Liu ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1837effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1838b4c625c6SSong Liu sh->dev[sh->pd_idx].log_checksum =
1839b4c625c6SSong Liu le32_to_cpu(payload->checksum[0]);
1840b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1841b4c625c6SSong Liu
1842b4c625c6SSong Liu if (sh->qd_idx >= 0) {
1843effe6ee7SSong Liu r5l_recovery_read_page(
1844effe6ee7SSong Liu log, ctx, sh->dev[sh->qd_idx].page,
1845effe6ee7SSong Liu r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1846b4c625c6SSong Liu sh->dev[sh->qd_idx].log_checksum =
1847b4c625c6SSong Liu le32_to_cpu(payload->checksum[1]);
1848b4c625c6SSong Liu set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1849b4c625c6SSong Liu }
1850b4c625c6SSong Liu clear_bit(STRIPE_R5C_CACHING, &sh->state);
1851b4c625c6SSong Liu }
1852b4c625c6SSong Liu
r5l_recovery_reset_stripe(struct stripe_head * sh)1853b4c625c6SSong Liu static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1854b4c625c6SSong Liu {
1855b4c625c6SSong Liu int i;
1856b4c625c6SSong Liu
1857b4c625c6SSong Liu sh->state = 0;
1858b4c625c6SSong Liu sh->log_start = MaxSector;
1859b4c625c6SSong Liu for (i = sh->disks; i--; )
1860b4c625c6SSong Liu sh->dev[i].flags = 0;
1861b4c625c6SSong Liu }
1862b4c625c6SSong Liu
1863b4c625c6SSong Liu static void
r5l_recovery_replay_one_stripe(struct r5conf * conf,struct stripe_head * sh,struct r5l_recovery_ctx * ctx)1864b4c625c6SSong Liu r5l_recovery_replay_one_stripe(struct r5conf *conf,
1865b4c625c6SSong Liu struct stripe_head *sh,
1866b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
1867b4c625c6SSong Liu {
1868b4c625c6SSong Liu struct md_rdev *rdev, *rrdev;
1869b4c625c6SSong Liu int disk_index;
1870b4c625c6SSong Liu int data_count = 0;
1871b4c625c6SSong Liu
1872b4c625c6SSong Liu for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1873b4c625c6SSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1874b4c625c6SSong Liu continue;
1875b4c625c6SSong Liu if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1876b4c625c6SSong Liu continue;
1877b4c625c6SSong Liu data_count++;
1878b4c625c6SSong Liu }
1879b4c625c6SSong Liu
1880b4c625c6SSong Liu /*
1881b4c625c6SSong Liu * stripes that only have parity must have been flushed
1882b4c625c6SSong Liu * before the crash that we are now recovering from, so
1883b4c625c6SSong Liu * there is nothing more to recovery.
1884b4c625c6SSong Liu */
1885b4c625c6SSong Liu if (data_count == 0)
1886b4c625c6SSong Liu goto out;
1887b4c625c6SSong Liu
1888b4c625c6SSong Liu for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1889b4c625c6SSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1890b4c625c6SSong Liu continue;
1891b4c625c6SSong Liu
1892b4c625c6SSong Liu /* in case device is broken */
1893*ad860670SYu Kuai rdev = conf->disks[disk_index].rdev;
1894b4c625c6SSong Liu if (rdev) {
1895b4c625c6SSong Liu atomic_inc(&rdev->nr_pending);
1896b4c625c6SSong Liu sync_page_io(rdev, sh->sector, PAGE_SIZE,
18974ce4c73fSBart Van Assche sh->dev[disk_index].page, REQ_OP_WRITE,
1898b4c625c6SSong Liu false);
1899b4c625c6SSong Liu rdev_dec_pending(rdev, rdev->mddev);
1900b4c625c6SSong Liu }
1901*ad860670SYu Kuai rrdev = conf->disks[disk_index].replacement;
1902b4c625c6SSong Liu if (rrdev) {
1903b4c625c6SSong Liu atomic_inc(&rrdev->nr_pending);
1904b4c625c6SSong Liu sync_page_io(rrdev, sh->sector, PAGE_SIZE,
19054ce4c73fSBart Van Assche sh->dev[disk_index].page, REQ_OP_WRITE,
1906b4c625c6SSong Liu false);
1907b4c625c6SSong Liu rdev_dec_pending(rrdev, rrdev->mddev);
1908b4c625c6SSong Liu }
1909b4c625c6SSong Liu }
1910b4c625c6SSong Liu ctx->data_parity_stripes++;
1911b4c625c6SSong Liu out:
1912b4c625c6SSong Liu r5l_recovery_reset_stripe(sh);
1913b4c625c6SSong Liu }
1914b4c625c6SSong Liu
1915b4c625c6SSong Liu static struct stripe_head *
r5c_recovery_alloc_stripe(struct r5conf * conf,sector_t stripe_sect,int noblock)1916483cbbedSAlexei Naberezhnov r5c_recovery_alloc_stripe(
1917483cbbedSAlexei Naberezhnov struct r5conf *conf,
1918483cbbedSAlexei Naberezhnov sector_t stripe_sect,
1919483cbbedSAlexei Naberezhnov int noblock)
1920b4c625c6SSong Liu {
1921b4c625c6SSong Liu struct stripe_head *sh;
1922b4c625c6SSong Liu
19232f2d51efSLogan Gunthorpe sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
19242f2d51efSLogan Gunthorpe noblock ? R5_GAS_NOBLOCK : 0);
1925b4c625c6SSong Liu if (!sh)
1926b4c625c6SSong Liu return NULL; /* no more stripe available */
1927b4c625c6SSong Liu
1928b4c625c6SSong Liu r5l_recovery_reset_stripe(sh);
1929b4c625c6SSong Liu
1930b4c625c6SSong Liu return sh;
1931b4c625c6SSong Liu }
1932b4c625c6SSong Liu
1933b4c625c6SSong Liu static struct stripe_head *
r5c_recovery_lookup_stripe(struct list_head * list,sector_t sect)1934b4c625c6SSong Liu r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1935b4c625c6SSong Liu {
1936b4c625c6SSong Liu struct stripe_head *sh;
1937b4c625c6SSong Liu
1938b4c625c6SSong Liu list_for_each_entry(sh, list, lru)
1939b4c625c6SSong Liu if (sh->sector == sect)
1940b4c625c6SSong Liu return sh;
1941b4c625c6SSong Liu return NULL;
1942b4c625c6SSong Liu }
1943b4c625c6SSong Liu
1944b4c625c6SSong Liu static void
r5c_recovery_drop_stripes(struct list_head * cached_stripe_list,struct r5l_recovery_ctx * ctx)1945b4c625c6SSong Liu r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1946b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
1947b4c625c6SSong Liu {
1948b4c625c6SSong Liu struct stripe_head *sh, *next;
1949b4c625c6SSong Liu
1950b4c625c6SSong Liu list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1951b4c625c6SSong Liu r5l_recovery_reset_stripe(sh);
1952b4c625c6SSong Liu list_del_init(&sh->lru);
1953b4c625c6SSong Liu raid5_release_stripe(sh);
1954b4c625c6SSong Liu }
1955b4c625c6SSong Liu }
1956b4c625c6SSong Liu
1957b4c625c6SSong Liu static void
r5c_recovery_replay_stripes(struct list_head * cached_stripe_list,struct r5l_recovery_ctx * ctx)1958b4c625c6SSong Liu r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1959b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
1960b4c625c6SSong Liu {
1961b4c625c6SSong Liu struct stripe_head *sh, *next;
1962b4c625c6SSong Liu
1963b4c625c6SSong Liu list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1964b4c625c6SSong Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1965b4c625c6SSong Liu r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1966b4c625c6SSong Liu list_del_init(&sh->lru);
1967b4c625c6SSong Liu raid5_release_stripe(sh);
1968b4c625c6SSong Liu }
1969b4c625c6SSong Liu }
1970b4c625c6SSong Liu
1971b4c625c6SSong Liu /* if matches return 0; otherwise return -EINVAL */
1972b4c625c6SSong Liu static int
r5l_recovery_verify_data_checksum(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct page * page,sector_t log_offset,__le32 log_checksum)1973effe6ee7SSong Liu r5l_recovery_verify_data_checksum(struct r5l_log *log,
1974effe6ee7SSong Liu struct r5l_recovery_ctx *ctx,
1975effe6ee7SSong Liu struct page *page,
1976b4c625c6SSong Liu sector_t log_offset, __le32 log_checksum)
1977b4c625c6SSong Liu {
1978b4c625c6SSong Liu void *addr;
1979b4c625c6SSong Liu u32 checksum;
1980b4c625c6SSong Liu
1981effe6ee7SSong Liu r5l_recovery_read_page(log, ctx, page, log_offset);
1982b4c625c6SSong Liu addr = kmap_atomic(page);
1983b4c625c6SSong Liu checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1984b4c625c6SSong Liu kunmap_atomic(addr);
1985b4c625c6SSong Liu return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1986b4c625c6SSong Liu }
1987b4c625c6SSong Liu
1988b4c625c6SSong Liu /*
1989b4c625c6SSong Liu * before loading data to stripe cache, we need verify checksum for all data,
1990b4c625c6SSong Liu * if there is mismatch for any data page, we drop all data in the mata block
1991b4c625c6SSong Liu */
1992b4c625c6SSong Liu static int
r5l_recovery_verify_data_checksum_for_mb(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1993b4c625c6SSong Liu r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1994b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
1995b4c625c6SSong Liu {
1996b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
1997b4c625c6SSong Liu struct r5conf *conf = mddev->private;
1998b4c625c6SSong Liu struct r5l_meta_block *mb = page_address(ctx->meta_page);
1999b4c625c6SSong Liu sector_t mb_offset = sizeof(struct r5l_meta_block);
2000b4c625c6SSong Liu sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2001b4c625c6SSong Liu struct page *page;
2002b4c625c6SSong Liu struct r5l_payload_data_parity *payload;
20032d4f4687SSong Liu struct r5l_payload_flush *payload_flush;
2004b4c625c6SSong Liu
2005b4c625c6SSong Liu page = alloc_page(GFP_KERNEL);
2006b4c625c6SSong Liu if (!page)
2007355810d1SShaohua Li return -ENOMEM;
2008355810d1SShaohua Li
2009b4c625c6SSong Liu while (mb_offset < le32_to_cpu(mb->meta_size)) {
2010b4c625c6SSong Liu payload = (void *)mb + mb_offset;
20112d4f4687SSong Liu payload_flush = (void *)mb + mb_offset;
2012b4c625c6SSong Liu
20131ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2014b4c625c6SSong Liu if (r5l_recovery_verify_data_checksum(
2015effe6ee7SSong Liu log, ctx, page, log_offset,
2016b4c625c6SSong Liu payload->checksum[0]) < 0)
2017b4c625c6SSong Liu goto mismatch;
20181ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2019b4c625c6SSong Liu if (r5l_recovery_verify_data_checksum(
2020effe6ee7SSong Liu log, ctx, page, log_offset,
2021b4c625c6SSong Liu payload->checksum[0]) < 0)
2022b4c625c6SSong Liu goto mismatch;
2023b4c625c6SSong Liu if (conf->max_degraded == 2 && /* q for RAID 6 */
2024b4c625c6SSong Liu r5l_recovery_verify_data_checksum(
2025effe6ee7SSong Liu log, ctx, page,
2026b4c625c6SSong Liu r5l_ring_add(log, log_offset,
2027b4c625c6SSong Liu BLOCK_SECTORS),
2028b4c625c6SSong Liu payload->checksum[1]) < 0)
2029b4c625c6SSong Liu goto mismatch;
20301ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
20312d4f4687SSong Liu /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
20322d4f4687SSong Liu } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2033b4c625c6SSong Liu goto mismatch;
2034b4c625c6SSong Liu
20351ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
20362d4f4687SSong Liu mb_offset += sizeof(struct r5l_payload_flush) +
20372d4f4687SSong Liu le32_to_cpu(payload_flush->size);
20382d4f4687SSong Liu } else {
20392d4f4687SSong Liu /* DATA or PARITY payload */
2040b4c625c6SSong Liu log_offset = r5l_ring_add(log, log_offset,
2041b4c625c6SSong Liu le32_to_cpu(payload->size));
2042b4c625c6SSong Liu mb_offset += sizeof(struct r5l_payload_data_parity) +
2043b4c625c6SSong Liu sizeof(__le32) *
2044b4c625c6SSong Liu (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2045b4c625c6SSong Liu }
2046b4c625c6SSong Liu
20472d4f4687SSong Liu }
20482d4f4687SSong Liu
2049b4c625c6SSong Liu put_page(page);
2050b4c625c6SSong Liu return 0;
2051b4c625c6SSong Liu
2052b4c625c6SSong Liu mismatch:
2053b4c625c6SSong Liu put_page(page);
2054b4c625c6SSong Liu return -EINVAL;
2055b4c625c6SSong Liu }
2056b4c625c6SSong Liu
2057b4c625c6SSong Liu /*
2058b4c625c6SSong Liu * Analyze all data/parity pages in one meta block
2059b4c625c6SSong Liu * Returns:
2060b4c625c6SSong Liu * 0 for success
2061b4c625c6SSong Liu * -EINVAL for unknown playload type
2062b4c625c6SSong Liu * -EAGAIN for checksum mismatch of data page
2063b4c625c6SSong Liu * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2064b4c625c6SSong Liu */
2065b4c625c6SSong Liu static int
r5c_recovery_analyze_meta_block(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct list_head * cached_stripe_list)2066b4c625c6SSong Liu r5c_recovery_analyze_meta_block(struct r5l_log *log,
2067b4c625c6SSong Liu struct r5l_recovery_ctx *ctx,
2068b4c625c6SSong Liu struct list_head *cached_stripe_list)
2069b4c625c6SSong Liu {
2070b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
2071b4c625c6SSong Liu struct r5conf *conf = mddev->private;
2072b4c625c6SSong Liu struct r5l_meta_block *mb;
2073b4c625c6SSong Liu struct r5l_payload_data_parity *payload;
20742d4f4687SSong Liu struct r5l_payload_flush *payload_flush;
2075b4c625c6SSong Liu int mb_offset;
2076b4c625c6SSong Liu sector_t log_offset;
2077b4c625c6SSong Liu sector_t stripe_sect;
2078b4c625c6SSong Liu struct stripe_head *sh;
2079b4c625c6SSong Liu int ret;
2080b4c625c6SSong Liu
2081b4c625c6SSong Liu /*
2082b4c625c6SSong Liu * for mismatch in data blocks, we will drop all data in this mb, but
2083b4c625c6SSong Liu * we will still read next mb for other data with FLUSH flag, as
2084b4c625c6SSong Liu * io_unit could finish out of order.
2085b4c625c6SSong Liu */
2086b4c625c6SSong Liu ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2087b4c625c6SSong Liu if (ret == -EINVAL)
2088b4c625c6SSong Liu return -EAGAIN;
2089b4c625c6SSong Liu else if (ret)
2090b4c625c6SSong Liu return ret; /* -ENOMEM duo to alloc_page() failed */
2091b4c625c6SSong Liu
2092b4c625c6SSong Liu mb = page_address(ctx->meta_page);
2093b4c625c6SSong Liu mb_offset = sizeof(struct r5l_meta_block);
2094b4c625c6SSong Liu log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2095b4c625c6SSong Liu
2096b4c625c6SSong Liu while (mb_offset < le32_to_cpu(mb->meta_size)) {
2097b4c625c6SSong Liu int dd;
2098b4c625c6SSong Liu
2099b4c625c6SSong Liu payload = (void *)mb + mb_offset;
21002d4f4687SSong Liu payload_flush = (void *)mb + mb_offset;
21012d4f4687SSong Liu
21021ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
21032d4f4687SSong Liu int i, count;
21042d4f4687SSong Liu
21052d4f4687SSong Liu count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
21062d4f4687SSong Liu for (i = 0; i < count; ++i) {
21072d4f4687SSong Liu stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
21082d4f4687SSong Liu sh = r5c_recovery_lookup_stripe(cached_stripe_list,
21092d4f4687SSong Liu stripe_sect);
21102d4f4687SSong Liu if (sh) {
21112d4f4687SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
21122d4f4687SSong Liu r5l_recovery_reset_stripe(sh);
21132d4f4687SSong Liu list_del_init(&sh->lru);
21142d4f4687SSong Liu raid5_release_stripe(sh);
21152d4f4687SSong Liu }
21162d4f4687SSong Liu }
21172d4f4687SSong Liu
21182d4f4687SSong Liu mb_offset += sizeof(struct r5l_payload_flush) +
21192d4f4687SSong Liu le32_to_cpu(payload_flush->size);
21202d4f4687SSong Liu continue;
21212d4f4687SSong Liu }
21222d4f4687SSong Liu
21232d4f4687SSong Liu /* DATA or PARITY payload */
21241ad45a9bSJason Yan stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2125b4c625c6SSong Liu raid5_compute_sector(
2126b4c625c6SSong Liu conf, le64_to_cpu(payload->location), 0, &dd,
2127b4c625c6SSong Liu NULL)
2128b4c625c6SSong Liu : le64_to_cpu(payload->location);
2129b4c625c6SSong Liu
2130b4c625c6SSong Liu sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2131b4c625c6SSong Liu stripe_sect);
2132b4c625c6SSong Liu
2133b4c625c6SSong Liu if (!sh) {
2134483cbbedSAlexei Naberezhnov sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2135b4c625c6SSong Liu /*
2136b4c625c6SSong Liu * cannot get stripe from raid5_get_active_stripe
2137b4c625c6SSong Liu * try replay some stripes
2138b4c625c6SSong Liu */
2139b4c625c6SSong Liu if (!sh) {
2140b4c625c6SSong Liu r5c_recovery_replay_stripes(
2141b4c625c6SSong Liu cached_stripe_list, ctx);
2142b4c625c6SSong Liu sh = r5c_recovery_alloc_stripe(
2143483cbbedSAlexei Naberezhnov conf, stripe_sect, 1);
2144b4c625c6SSong Liu }
2145b4c625c6SSong Liu if (!sh) {
2146483cbbedSAlexei Naberezhnov int new_size = conf->min_nr_stripes * 2;
2147b4c625c6SSong Liu pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2148b4c625c6SSong Liu mdname(mddev),
2149483cbbedSAlexei Naberezhnov new_size);
2150483cbbedSAlexei Naberezhnov ret = raid5_set_cache_size(mddev, new_size);
2151483cbbedSAlexei Naberezhnov if (conf->min_nr_stripes <= new_size / 2) {
2152483cbbedSAlexei Naberezhnov pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2153483cbbedSAlexei Naberezhnov mdname(mddev),
2154483cbbedSAlexei Naberezhnov ret,
2155483cbbedSAlexei Naberezhnov new_size,
2156483cbbedSAlexei Naberezhnov conf->min_nr_stripes,
2157483cbbedSAlexei Naberezhnov conf->max_nr_stripes);
2158483cbbedSAlexei Naberezhnov return -ENOMEM;
2159483cbbedSAlexei Naberezhnov }
2160483cbbedSAlexei Naberezhnov sh = r5c_recovery_alloc_stripe(
2161483cbbedSAlexei Naberezhnov conf, stripe_sect, 0);
2162b4c625c6SSong Liu }
2163b4c625c6SSong Liu if (!sh) {
2164b4c625c6SSong Liu pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2165b4c625c6SSong Liu mdname(mddev));
2166b4c625c6SSong Liu return -ENOMEM;
2167b4c625c6SSong Liu }
2168b4c625c6SSong Liu list_add_tail(&sh->lru, cached_stripe_list);
2169b4c625c6SSong Liu }
2170b4c625c6SSong Liu
21711ad45a9bSJason Yan if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2172f7b7bee7SZhengyuan Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2173f7b7bee7SZhengyuan Liu test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2174b4c625c6SSong Liu r5l_recovery_replay_one_stripe(conf, sh, ctx);
2175b4c625c6SSong Liu list_move_tail(&sh->lru, cached_stripe_list);
2176b4c625c6SSong Liu }
2177b4c625c6SSong Liu r5l_recovery_load_data(log, sh, ctx, payload,
2178b4c625c6SSong Liu log_offset);
21791ad45a9bSJason Yan } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2180b4c625c6SSong Liu r5l_recovery_load_parity(log, sh, ctx, payload,
2181b4c625c6SSong Liu log_offset);
2182b4c625c6SSong Liu else
2183b4c625c6SSong Liu return -EINVAL;
2184b4c625c6SSong Liu
2185b4c625c6SSong Liu log_offset = r5l_ring_add(log, log_offset,
2186b4c625c6SSong Liu le32_to_cpu(payload->size));
2187b4c625c6SSong Liu
2188b4c625c6SSong Liu mb_offset += sizeof(struct r5l_payload_data_parity) +
2189b4c625c6SSong Liu sizeof(__le32) *
2190b4c625c6SSong Liu (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2191b4c625c6SSong Liu }
2192b4c625c6SSong Liu
2193b4c625c6SSong Liu return 0;
2194b4c625c6SSong Liu }
2195b4c625c6SSong Liu
2196b4c625c6SSong Liu /*
2197b4c625c6SSong Liu * Load the stripe into cache. The stripe will be written out later by
2198b4c625c6SSong Liu * the stripe cache state machine.
2199b4c625c6SSong Liu */
r5c_recovery_load_one_stripe(struct r5l_log * log,struct stripe_head * sh)2200b4c625c6SSong Liu static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2201b4c625c6SSong Liu struct stripe_head *sh)
2202b4c625c6SSong Liu {
2203b4c625c6SSong Liu struct r5dev *dev;
2204b4c625c6SSong Liu int i;
2205b4c625c6SSong Liu
2206b4c625c6SSong Liu for (i = sh->disks; i--; ) {
2207b4c625c6SSong Liu dev = sh->dev + i;
2208b4c625c6SSong Liu if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2209b4c625c6SSong Liu set_bit(R5_InJournal, &dev->flags);
2210b4c625c6SSong Liu set_bit(R5_UPTODATE, &dev->flags);
2211b4c625c6SSong Liu }
2212b4c625c6SSong Liu }
2213b4c625c6SSong Liu }
2214b4c625c6SSong Liu
2215b4c625c6SSong Liu /*
2216b4c625c6SSong Liu * Scan through the log for all to-be-flushed data
2217b4c625c6SSong Liu *
2218b4c625c6SSong Liu * For stripes with data and parity, namely Data-Parity stripe
2219b4c625c6SSong Liu * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2220b4c625c6SSong Liu *
2221b4c625c6SSong Liu * For stripes with only data, namely Data-Only stripe
2222b4c625c6SSong Liu * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2223b4c625c6SSong Liu *
2224b4c625c6SSong Liu * For a stripe, if we see data after parity, we should discard all previous
2225b4c625c6SSong Liu * data and parity for this stripe, as these data are already flushed to
2226b4c625c6SSong Liu * the array.
2227b4c625c6SSong Liu *
2228b4c625c6SSong Liu * At the end of the scan, we return the new journal_tail, which points to
2229b4c625c6SSong Liu * first data-only stripe on the journal device, or next invalid meta block.
2230b4c625c6SSong Liu */
r5c_recovery_flush_log(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2231b4c625c6SSong Liu static int r5c_recovery_flush_log(struct r5l_log *log,
2232b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
2233b4c625c6SSong Liu {
2234bc8f167fSJackieLiu struct stripe_head *sh;
2235b4c625c6SSong Liu int ret = 0;
2236b4c625c6SSong Liu
2237b4c625c6SSong Liu /* scan through the log */
2238b4c625c6SSong Liu while (1) {
2239b4c625c6SSong Liu if (r5l_recovery_read_meta_block(log, ctx))
2240b4c625c6SSong Liu break;
2241b4c625c6SSong Liu
2242b4c625c6SSong Liu ret = r5c_recovery_analyze_meta_block(log, ctx,
2243b4c625c6SSong Liu &ctx->cached_list);
2244b4c625c6SSong Liu /*
2245b4c625c6SSong Liu * -EAGAIN means mismatch in data block, in this case, we still
2246b4c625c6SSong Liu * try scan the next metablock
2247b4c625c6SSong Liu */
2248b4c625c6SSong Liu if (ret && ret != -EAGAIN)
2249b4c625c6SSong Liu break; /* ret == -EINVAL or -ENOMEM */
2250b4c625c6SSong Liu ctx->seq++;
2251b4c625c6SSong Liu ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2252b4c625c6SSong Liu }
2253b4c625c6SSong Liu
2254b4c625c6SSong Liu if (ret == -ENOMEM) {
2255b4c625c6SSong Liu r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2256b4c625c6SSong Liu return ret;
2257b4c625c6SSong Liu }
2258b4c625c6SSong Liu
2259b4c625c6SSong Liu /* replay data-parity stripes */
2260b4c625c6SSong Liu r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2261b4c625c6SSong Liu
2262b4c625c6SSong Liu /* load data-only stripes to stripe cache */
2263bc8f167fSJackieLiu list_for_each_entry(sh, &ctx->cached_list, lru) {
2264b4c625c6SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2265b4c625c6SSong Liu r5c_recovery_load_one_stripe(log, sh);
2266b4c625c6SSong Liu ctx->data_only_stripes++;
2267b4c625c6SSong Liu }
2268b4c625c6SSong Liu
2269b4c625c6SSong Liu return 0;
2270b4c625c6SSong Liu }
2271355810d1SShaohua Li
2272355810d1SShaohua Li /*
2273355810d1SShaohua Li * we did a recovery. Now ctx.pos points to an invalid meta block. New
2274355810d1SShaohua Li * log will start here. but we can't let superblock point to last valid
2275355810d1SShaohua Li * meta block. The log might looks like:
2276355810d1SShaohua Li * | meta 1| meta 2| meta 3|
2277355810d1SShaohua Li * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2278355810d1SShaohua Li * superblock points to meta 1, we write a new valid meta 2n. if crash
2279355810d1SShaohua Li * happens again, new recovery will start from meta 1. Since meta 2n is
2280355810d1SShaohua Li * valid now, recovery will think meta 3 is valid, which is wrong.
2281355810d1SShaohua Li * The solution is we create a new meta in meta2 with its seq == meta
22823c6edc66SSong Liu * 1's seq + 10000 and let superblock points to meta2. The same recovery
22833c6edc66SSong Liu * will not think meta 3 is a valid meta, because its seq doesn't match
2284355810d1SShaohua Li */
2285355810d1SShaohua Li
2286b4c625c6SSong Liu /*
2287b4c625c6SSong Liu * Before recovery, the log looks like the following
2288b4c625c6SSong Liu *
2289b4c625c6SSong Liu * ---------------------------------------------
2290b4c625c6SSong Liu * | valid log | invalid log |
2291b4c625c6SSong Liu * ---------------------------------------------
2292b4c625c6SSong Liu * ^
2293b4c625c6SSong Liu * |- log->last_checkpoint
2294b4c625c6SSong Liu * |- log->last_cp_seq
2295b4c625c6SSong Liu *
2296b4c625c6SSong Liu * Now we scan through the log until we see invalid entry
2297b4c625c6SSong Liu *
2298b4c625c6SSong Liu * ---------------------------------------------
2299b4c625c6SSong Liu * | valid log | invalid log |
2300b4c625c6SSong Liu * ---------------------------------------------
2301b4c625c6SSong Liu * ^ ^
2302b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos
2303b4c625c6SSong Liu * |- log->last_cp_seq |- ctx->seq
2304b4c625c6SSong Liu *
2305b4c625c6SSong Liu * From this point, we need to increase seq number by 10 to avoid
2306b4c625c6SSong Liu * confusing next recovery.
2307b4c625c6SSong Liu *
2308b4c625c6SSong Liu * ---------------------------------------------
2309b4c625c6SSong Liu * | valid log | invalid log |
2310b4c625c6SSong Liu * ---------------------------------------------
2311b4c625c6SSong Liu * ^ ^
2312b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+1
23133c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10001
2314b4c625c6SSong Liu *
2315b4c625c6SSong Liu * However, it is not safe to start the state machine yet, because data only
2316b4c625c6SSong Liu * parities are not yet secured in RAID. To save these data only parities, we
2317b4c625c6SSong Liu * rewrite them from seq+11.
2318b4c625c6SSong Liu *
2319b4c625c6SSong Liu * -----------------------------------------------------------------
2320b4c625c6SSong Liu * | valid log | data only stripes | invalid log |
2321b4c625c6SSong Liu * -----------------------------------------------------------------
2322b4c625c6SSong Liu * ^ ^
2323b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+n
23243c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10000+n
2325b4c625c6SSong Liu *
2326b4c625c6SSong Liu * If failure happens again during this process, the recovery can safe start
2327b4c625c6SSong Liu * again from log->last_checkpoint.
2328b4c625c6SSong Liu *
2329b4c625c6SSong Liu * Once data only stripes are rewritten to journal, we move log_tail
2330b4c625c6SSong Liu *
2331b4c625c6SSong Liu * -----------------------------------------------------------------
2332b4c625c6SSong Liu * | old log | data only stripes | invalid log |
2333b4c625c6SSong Liu * -----------------------------------------------------------------
2334b4c625c6SSong Liu * ^ ^
2335b4c625c6SSong Liu * |- log->last_checkpoint |- ctx->pos+n
23363c6edc66SSong Liu * |- log->last_cp_seq |- ctx->seq+10000+n
2337b4c625c6SSong Liu *
2338b4c625c6SSong Liu * Then we can safely start the state machine. If failure happens from this
2339b4c625c6SSong Liu * point on, the recovery will start from new log->last_checkpoint.
2340b4c625c6SSong Liu */
2341b4c625c6SSong Liu static int
r5c_recovery_rewrite_data_only_stripes(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2342b4c625c6SSong Liu r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2343b4c625c6SSong Liu struct r5l_recovery_ctx *ctx)
2344b4c625c6SSong Liu {
2345a85dd7b8SSong Liu struct stripe_head *sh;
2346b4c625c6SSong Liu struct mddev *mddev = log->rdev->mddev;
2347b4c625c6SSong Liu struct page *page;
23483c66abbaSSong Liu sector_t next_checkpoint = MaxSector;
2349b4c625c6SSong Liu
2350b4c625c6SSong Liu page = alloc_page(GFP_KERNEL);
2351b4c625c6SSong Liu if (!page) {
2352b4c625c6SSong Liu pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2353b4c625c6SSong Liu mdname(mddev));
2354b4c625c6SSong Liu return -ENOMEM;
2355b4c625c6SSong Liu }
2356b4c625c6SSong Liu
23573c66abbaSSong Liu WARN_ON(list_empty(&ctx->cached_list));
23583c66abbaSSong Liu
2359a85dd7b8SSong Liu list_for_each_entry(sh, &ctx->cached_list, lru) {
2360b4c625c6SSong Liu struct r5l_meta_block *mb;
2361b4c625c6SSong Liu int i;
2362b4c625c6SSong Liu int offset;
2363b4c625c6SSong Liu sector_t write_pos;
2364b4c625c6SSong Liu
2365b4c625c6SSong Liu WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2366b4c625c6SSong Liu r5l_recovery_create_empty_meta_block(log, page,
2367b4c625c6SSong Liu ctx->pos, ctx->seq);
2368b4c625c6SSong Liu mb = page_address(page);
2369b4c625c6SSong Liu offset = le32_to_cpu(mb->meta_size);
2370fc833c2aSJackieLiu write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2371b4c625c6SSong Liu
2372b4c625c6SSong Liu for (i = sh->disks; i--; ) {
2373b4c625c6SSong Liu struct r5dev *dev = &sh->dev[i];
2374b4c625c6SSong Liu struct r5l_payload_data_parity *payload;
2375b4c625c6SSong Liu void *addr;
2376b4c625c6SSong Liu
2377b4c625c6SSong Liu if (test_bit(R5_InJournal, &dev->flags)) {
2378b4c625c6SSong Liu payload = (void *)mb + offset;
2379b4c625c6SSong Liu payload->header.type = cpu_to_le16(
2380b4c625c6SSong Liu R5LOG_PAYLOAD_DATA);
23811ad45a9bSJason Yan payload->size = cpu_to_le32(BLOCK_SECTORS);
2382b4c625c6SSong Liu payload->location = cpu_to_le64(
2383b4c625c6SSong Liu raid5_compute_blocknr(sh, i, 0));
2384b4c625c6SSong Liu addr = kmap_atomic(dev->page);
2385b4c625c6SSong Liu payload->checksum[0] = cpu_to_le32(
2386b4c625c6SSong Liu crc32c_le(log->uuid_checksum, addr,
2387b4c625c6SSong Liu PAGE_SIZE));
2388b4c625c6SSong Liu kunmap_atomic(addr);
2389b4c625c6SSong Liu sync_page_io(log->rdev, write_pos, PAGE_SIZE,
23904ce4c73fSBart Van Assche dev->page, REQ_OP_WRITE, false);
2391b4c625c6SSong Liu write_pos = r5l_ring_add(log, write_pos,
2392b4c625c6SSong Liu BLOCK_SECTORS);
2393b4c625c6SSong Liu offset += sizeof(__le32) +
2394b4c625c6SSong Liu sizeof(struct r5l_payload_data_parity);
2395b4c625c6SSong Liu
2396b4c625c6SSong Liu }
2397b4c625c6SSong Liu }
2398b4c625c6SSong Liu mb->meta_size = cpu_to_le32(offset);
23995c88f403SSong Liu mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
24005c88f403SSong Liu mb, PAGE_SIZE));
2401b4c625c6SSong Liu sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
24024ce4c73fSBart Van Assche REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
2403b4c625c6SSong Liu sh->log_start = ctx->pos;
24043c66abbaSSong Liu list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
24053c66abbaSSong Liu atomic_inc(&log->stripe_in_journal_count);
2406b4c625c6SSong Liu ctx->pos = write_pos;
2407b4c625c6SSong Liu ctx->seq += 1;
24083c66abbaSSong Liu next_checkpoint = sh->log_start;
2409b4c625c6SSong Liu }
24103c66abbaSSong Liu log->next_checkpoint = next_checkpoint;
2411b4c625c6SSong Liu __free_page(page);
2412b4c625c6SSong Liu return 0;
2413b4c625c6SSong Liu }
2414b4c625c6SSong Liu
r5c_recovery_flush_data_only_stripes(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2415a85dd7b8SSong Liu static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2416a85dd7b8SSong Liu struct r5l_recovery_ctx *ctx)
2417a85dd7b8SSong Liu {
2418a85dd7b8SSong Liu struct mddev *mddev = log->rdev->mddev;
2419a85dd7b8SSong Liu struct r5conf *conf = mddev->private;
2420a85dd7b8SSong Liu struct stripe_head *sh, *next;
2421c9020e64SSong Liu bool cleared_pending = false;
2422a85dd7b8SSong Liu
2423a85dd7b8SSong Liu if (ctx->data_only_stripes == 0)
2424a85dd7b8SSong Liu return;
2425a85dd7b8SSong Liu
2426c9020e64SSong Liu if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2427c9020e64SSong Liu cleared_pending = true;
2428c9020e64SSong Liu clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2429c9020e64SSong Liu }
2430a85dd7b8SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2431a85dd7b8SSong Liu
2432a85dd7b8SSong Liu list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2433a85dd7b8SSong Liu r5c_make_stripe_write_out(sh);
2434a85dd7b8SSong Liu set_bit(STRIPE_HANDLE, &sh->state);
2435a85dd7b8SSong Liu list_del_init(&sh->lru);
2436a85dd7b8SSong Liu raid5_release_stripe(sh);
2437a85dd7b8SSong Liu }
2438a85dd7b8SSong Liu
2439a85dd7b8SSong Liu /* reuse conf->wait_for_quiescent in recovery */
2440a85dd7b8SSong Liu wait_event(conf->wait_for_quiescent,
2441a85dd7b8SSong Liu atomic_read(&conf->active_stripes) == 0);
2442a85dd7b8SSong Liu
2443a85dd7b8SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2444c9020e64SSong Liu if (cleared_pending)
2445c9020e64SSong Liu set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2446a85dd7b8SSong Liu }
2447a85dd7b8SSong Liu
r5l_recovery_log(struct r5l_log * log)2448f6bed0efSShaohua Li static int r5l_recovery_log(struct r5l_log *log)
2449f6bed0efSShaohua Li {
24505aabf7c4SSong Liu struct mddev *mddev = log->rdev->mddev;
2451effe6ee7SSong Liu struct r5l_recovery_ctx *ctx;
24525aabf7c4SSong Liu int ret;
245343b96748SJackieLiu sector_t pos;
2454355810d1SShaohua Li
2455effe6ee7SSong Liu ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2456effe6ee7SSong Liu if (!ctx)
2457355810d1SShaohua Li return -ENOMEM;
2458355810d1SShaohua Li
2459effe6ee7SSong Liu ctx->pos = log->last_checkpoint;
2460effe6ee7SSong Liu ctx->seq = log->last_cp_seq;
2461effe6ee7SSong Liu INIT_LIST_HEAD(&ctx->cached_list);
2462effe6ee7SSong Liu ctx->meta_page = alloc_page(GFP_KERNEL);
2463effe6ee7SSong Liu
2464effe6ee7SSong Liu if (!ctx->meta_page) {
2465effe6ee7SSong Liu ret = -ENOMEM;
2466effe6ee7SSong Liu goto meta_page;
2467effe6ee7SSong Liu }
2468effe6ee7SSong Liu
2469effe6ee7SSong Liu if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2470effe6ee7SSong Liu ret = -ENOMEM;
2471effe6ee7SSong Liu goto ra_pool;
2472effe6ee7SSong Liu }
2473effe6ee7SSong Liu
2474effe6ee7SSong Liu ret = r5c_recovery_flush_log(log, ctx);
2475355810d1SShaohua Li
2476355810d1SShaohua Li if (ret)
2477effe6ee7SSong Liu goto error;
24785aabf7c4SSong Liu
2479effe6ee7SSong Liu pos = ctx->pos;
2480effe6ee7SSong Liu ctx->seq += 10000;
248143b96748SJackieLiu
2482effe6ee7SSong Liu if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
248392e6245dSSong Liu pr_info("md/raid:%s: starting from clean shutdown\n",
24845aabf7c4SSong Liu mdname(mddev));
2485a85dd7b8SSong Liu else
248692e6245dSSong Liu pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2487effe6ee7SSong Liu mdname(mddev), ctx->data_only_stripes,
2488effe6ee7SSong Liu ctx->data_parity_stripes);
24895aabf7c4SSong Liu
2490effe6ee7SSong Liu if (ctx->data_only_stripes == 0) {
2491effe6ee7SSong Liu log->next_checkpoint = ctx->pos;
2492effe6ee7SSong Liu r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2493effe6ee7SSong Liu ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2494effe6ee7SSong Liu } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
24955aabf7c4SSong Liu pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
24965aabf7c4SSong Liu mdname(mddev));
2497effe6ee7SSong Liu ret = -EIO;
2498effe6ee7SSong Liu goto error;
24995aabf7c4SSong Liu }
25005aabf7c4SSong Liu
2501effe6ee7SSong Liu log->log_start = ctx->pos;
2502effe6ee7SSong Liu log->seq = ctx->seq;
250343b96748SJackieLiu log->last_checkpoint = pos;
250443b96748SJackieLiu r5l_write_super(log, pos);
2505a85dd7b8SSong Liu
2506effe6ee7SSong Liu r5c_recovery_flush_data_only_stripes(log, ctx);
2507effe6ee7SSong Liu ret = 0;
2508effe6ee7SSong Liu error:
2509effe6ee7SSong Liu r5l_recovery_free_ra_pool(log, ctx);
2510effe6ee7SSong Liu ra_pool:
2511effe6ee7SSong Liu __free_page(ctx->meta_page);
2512effe6ee7SSong Liu meta_page:
2513effe6ee7SSong Liu kfree(ctx);
2514effe6ee7SSong Liu return ret;
2515f6bed0efSShaohua Li }
2516f6bed0efSShaohua Li
r5l_write_super(struct r5l_log * log,sector_t cp)2517f6bed0efSShaohua Li static void r5l_write_super(struct r5l_log *log, sector_t cp)
2518f6bed0efSShaohua Li {
2519f6bed0efSShaohua Li struct mddev *mddev = log->rdev->mddev;
2520f6bed0efSShaohua Li
2521f6bed0efSShaohua Li log->rdev->journal_tail = cp;
25222953079cSShaohua Li set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2523f6bed0efSShaohua Li }
2524f6bed0efSShaohua Li
r5c_journal_mode_show(struct mddev * mddev,char * page)25252c7da14bSSong Liu static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
25262c7da14bSSong Liu {
2527a72cbf83SSong Liu struct r5conf *conf;
25282c7da14bSSong Liu int ret;
25292c7da14bSSong Liu
253078ede6a0SLogan Gunthorpe ret = mddev_lock(mddev);
253178ede6a0SLogan Gunthorpe if (ret)
253278ede6a0SLogan Gunthorpe return ret;
253378ede6a0SLogan Gunthorpe
2534a72cbf83SSong Liu conf = mddev->private;
253578ede6a0SLogan Gunthorpe if (!conf || !conf->log)
253678ede6a0SLogan Gunthorpe goto out_unlock;
25372c7da14bSSong Liu
25382c7da14bSSong Liu switch (conf->log->r5c_journal_mode) {
25392c7da14bSSong Liu case R5C_JOURNAL_MODE_WRITE_THROUGH:
25402c7da14bSSong Liu ret = snprintf(
25412c7da14bSSong Liu page, PAGE_SIZE, "[%s] %s\n",
25422c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
25432c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
25442c7da14bSSong Liu break;
25452c7da14bSSong Liu case R5C_JOURNAL_MODE_WRITE_BACK:
25462c7da14bSSong Liu ret = snprintf(
25472c7da14bSSong Liu page, PAGE_SIZE, "%s [%s]\n",
25482c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
25492c7da14bSSong Liu r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
25502c7da14bSSong Liu break;
25512c7da14bSSong Liu default:
25522c7da14bSSong Liu ret = 0;
25532c7da14bSSong Liu }
255478ede6a0SLogan Gunthorpe
255578ede6a0SLogan Gunthorpe out_unlock:
255678ede6a0SLogan Gunthorpe mddev_unlock(mddev);
25572c7da14bSSong Liu return ret;
25582c7da14bSSong Liu }
25592c7da14bSSong Liu
256078e470c2SHeinz Mauelshagen /*
256178e470c2SHeinz Mauelshagen * Set journal cache mode on @mddev (external API initially needed by dm-raid).
256278e470c2SHeinz Mauelshagen *
256378e470c2SHeinz Mauelshagen * @mode as defined in 'enum r5c_journal_mode'.
256478e470c2SHeinz Mauelshagen *
256578e470c2SHeinz Mauelshagen */
r5c_journal_mode_set(struct mddev * mddev,int mode)256678e470c2SHeinz Mauelshagen int r5c_journal_mode_set(struct mddev *mddev, int mode)
25672c7da14bSSong Liu {
2568b44886c5SSong Liu struct r5conf *conf;
25692c7da14bSSong Liu
257078e470c2SHeinz Mauelshagen if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
257178e470c2SHeinz Mauelshagen mode > R5C_JOURNAL_MODE_WRITE_BACK)
25722c7da14bSSong Liu return -EINVAL;
25732c7da14bSSong Liu
2574b44886c5SSong Liu conf = mddev->private;
2575ff35f58eSSong Liu if (!conf || !conf->log)
2576b44886c5SSong Liu return -ENODEV;
2577b44886c5SSong Liu
25782e38a37fSSong Liu if (raid5_calc_degraded(conf) > 0 &&
2579ff35f58eSSong Liu mode == R5C_JOURNAL_MODE_WRITE_BACK)
25802e38a37fSSong Liu return -EINVAL;
25812e38a37fSSong Liu
258278e470c2SHeinz Mauelshagen conf->log->r5c_journal_mode = mode;
25832c7da14bSSong Liu
25842c7da14bSSong Liu pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
258578e470c2SHeinz Mauelshagen mdname(mddev), mode, r5c_journal_mode_str[mode]);
258678e470c2SHeinz Mauelshagen return 0;
258778e470c2SHeinz Mauelshagen }
258878e470c2SHeinz Mauelshagen EXPORT_SYMBOL(r5c_journal_mode_set);
258978e470c2SHeinz Mauelshagen
r5c_journal_mode_store(struct mddev * mddev,const char * page,size_t length)259078e470c2SHeinz Mauelshagen static ssize_t r5c_journal_mode_store(struct mddev *mddev,
259178e470c2SHeinz Mauelshagen const char *page, size_t length)
259278e470c2SHeinz Mauelshagen {
259378e470c2SHeinz Mauelshagen int mode = ARRAY_SIZE(r5c_journal_mode_str);
259478e470c2SHeinz Mauelshagen size_t len = length;
2595ff35f58eSSong Liu int ret;
259678e470c2SHeinz Mauelshagen
259778e470c2SHeinz Mauelshagen if (len < 2)
259878e470c2SHeinz Mauelshagen return -EINVAL;
259978e470c2SHeinz Mauelshagen
260078e470c2SHeinz Mauelshagen if (page[len - 1] == '\n')
260178e470c2SHeinz Mauelshagen len--;
260278e470c2SHeinz Mauelshagen
260378e470c2SHeinz Mauelshagen while (mode--)
260478e470c2SHeinz Mauelshagen if (strlen(r5c_journal_mode_str[mode]) == len &&
260578e470c2SHeinz Mauelshagen !strncmp(page, r5c_journal_mode_str[mode], len))
260678e470c2SHeinz Mauelshagen break;
26071b172e0bSYu Kuai ret = mddev_suspend_and_lock(mddev);
2608ff35f58eSSong Liu if (ret)
2609ff35f58eSSong Liu return ret;
2610ff35f58eSSong Liu ret = r5c_journal_mode_set(mddev, mode);
26111b172e0bSYu Kuai mddev_unlock_and_resume(mddev);
2612ff35f58eSSong Liu return ret ?: length;
26132c7da14bSSong Liu }
26142c7da14bSSong Liu
26152c7da14bSSong Liu struct md_sysfs_entry
26162c7da14bSSong Liu r5c_journal_mode = __ATTR(journal_mode, 0644,
26172c7da14bSSong Liu r5c_journal_mode_show, r5c_journal_mode_store);
26182c7da14bSSong Liu
26192ded3703SSong Liu /*
26202ded3703SSong Liu * Try handle write operation in caching phase. This function should only
26212ded3703SSong Liu * be called in write-back mode.
26222ded3703SSong Liu *
26232ded3703SSong Liu * If all outstanding writes can be handled in caching phase, returns 0
26242ded3703SSong Liu * If writes requires write-out phase, call r5c_make_stripe_write_out()
26252ded3703SSong Liu * and returns -EAGAIN
26262ded3703SSong Liu */
r5c_try_caching_write(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s,int disks)26272ded3703SSong Liu int r5c_try_caching_write(struct r5conf *conf,
26282ded3703SSong Liu struct stripe_head *sh,
26292ded3703SSong Liu struct stripe_head_state *s,
26302ded3703SSong Liu int disks)
26312ded3703SSong Liu {
263206a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
26331e6d690bSSong Liu int i;
26341e6d690bSSong Liu struct r5dev *dev;
26351e6d690bSSong Liu int to_cache = 0;
26366f28c5c3SLogan Gunthorpe void __rcu **pslot;
263703b047f4SSong Liu sector_t tree_index;
263803b047f4SSong Liu int ret;
263903b047f4SSong Liu uintptr_t refcount;
26402ded3703SSong Liu
26412ded3703SSong Liu BUG_ON(!r5c_is_writeback(log));
26422ded3703SSong Liu
26431e6d690bSSong Liu if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
26441e6d690bSSong Liu /*
26451e6d690bSSong Liu * There are two different scenarios here:
26461e6d690bSSong Liu * 1. The stripe has some data cached, and it is sent to
26471e6d690bSSong Liu * write-out phase for reclaim
26481e6d690bSSong Liu * 2. The stripe is clean, and this is the first write
26491e6d690bSSong Liu *
26501e6d690bSSong Liu * For 1, return -EAGAIN, so we continue with
26511e6d690bSSong Liu * handle_stripe_dirtying().
26521e6d690bSSong Liu *
26531e6d690bSSong Liu * For 2, set STRIPE_R5C_CACHING and continue with caching
26541e6d690bSSong Liu * write.
26551e6d690bSSong Liu */
26561e6d690bSSong Liu
26571e6d690bSSong Liu /* case 1: anything injournal or anything in written */
26581e6d690bSSong Liu if (s->injournal > 0 || s->written > 0)
26591e6d690bSSong Liu return -EAGAIN;
26601e6d690bSSong Liu /* case 2 */
26611e6d690bSSong Liu set_bit(STRIPE_R5C_CACHING, &sh->state);
26621e6d690bSSong Liu }
26631e6d690bSSong Liu
26642e38a37fSSong Liu /*
26652e38a37fSSong Liu * When run in degraded mode, array is set to write-through mode.
26662e38a37fSSong Liu * This check helps drain pending write safely in the transition to
26672e38a37fSSong Liu * write-through mode.
26685ddf0440SSong Liu *
26695ddf0440SSong Liu * When a stripe is syncing, the write is also handled in write
26705ddf0440SSong Liu * through mode.
26712e38a37fSSong Liu */
26725ddf0440SSong Liu if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
26732e38a37fSSong Liu r5c_make_stripe_write_out(sh);
26742e38a37fSSong Liu return -EAGAIN;
26752e38a37fSSong Liu }
26762e38a37fSSong Liu
26771e6d690bSSong Liu for (i = disks; i--; ) {
26781e6d690bSSong Liu dev = &sh->dev[i];
26791e6d690bSSong Liu /* if non-overwrite, use writing-out phase */
26801e6d690bSSong Liu if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
26811e6d690bSSong Liu !test_bit(R5_InJournal, &dev->flags)) {
26822ded3703SSong Liu r5c_make_stripe_write_out(sh);
26832ded3703SSong Liu return -EAGAIN;
26842ded3703SSong Liu }
26851e6d690bSSong Liu }
26861e6d690bSSong Liu
268703b047f4SSong Liu /* if the stripe is not counted in big_stripe_tree, add it now */
268803b047f4SSong Liu if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
268903b047f4SSong Liu !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
269003b047f4SSong Liu tree_index = r5c_tree_index(conf, sh->sector);
269103b047f4SSong Liu spin_lock(&log->tree_lock);
269203b047f4SSong Liu pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
269303b047f4SSong Liu tree_index);
269403b047f4SSong Liu if (pslot) {
269503b047f4SSong Liu refcount = (uintptr_t)radix_tree_deref_slot_protected(
269603b047f4SSong Liu pslot, &log->tree_lock) >>
269703b047f4SSong Liu R5C_RADIX_COUNT_SHIFT;
269803b047f4SSong Liu radix_tree_replace_slot(
269903b047f4SSong Liu &log->big_stripe_tree, pslot,
270003b047f4SSong Liu (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
270103b047f4SSong Liu } else {
270203b047f4SSong Liu /*
270303b047f4SSong Liu * this radix_tree_insert can fail safely, so no
270403b047f4SSong Liu * need to call radix_tree_preload()
270503b047f4SSong Liu */
270603b047f4SSong Liu ret = radix_tree_insert(
270703b047f4SSong Liu &log->big_stripe_tree, tree_index,
270803b047f4SSong Liu (void *)(1 << R5C_RADIX_COUNT_SHIFT));
270903b047f4SSong Liu if (ret) {
271003b047f4SSong Liu spin_unlock(&log->tree_lock);
271103b047f4SSong Liu r5c_make_stripe_write_out(sh);
271203b047f4SSong Liu return -EAGAIN;
271303b047f4SSong Liu }
271403b047f4SSong Liu }
271503b047f4SSong Liu spin_unlock(&log->tree_lock);
271603b047f4SSong Liu
271703b047f4SSong Liu /*
271803b047f4SSong Liu * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
271903b047f4SSong Liu * counted in the radix tree
272003b047f4SSong Liu */
272103b047f4SSong Liu set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
272203b047f4SSong Liu atomic_inc(&conf->r5c_cached_partial_stripes);
272303b047f4SSong Liu }
272403b047f4SSong Liu
27251e6d690bSSong Liu for (i = disks; i--; ) {
27261e6d690bSSong Liu dev = &sh->dev[i];
27271e6d690bSSong Liu if (dev->towrite) {
27281e6d690bSSong Liu set_bit(R5_Wantwrite, &dev->flags);
27291e6d690bSSong Liu set_bit(R5_Wantdrain, &dev->flags);
27301e6d690bSSong Liu set_bit(R5_LOCKED, &dev->flags);
27311e6d690bSSong Liu to_cache++;
27321e6d690bSSong Liu }
27331e6d690bSSong Liu }
27341e6d690bSSong Liu
27351e6d690bSSong Liu if (to_cache) {
27361e6d690bSSong Liu set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
27371e6d690bSSong Liu /*
27381e6d690bSSong Liu * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
27391e6d690bSSong Liu * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
27401e6d690bSSong Liu * r5c_handle_data_cached()
27411e6d690bSSong Liu */
27421e6d690bSSong Liu set_bit(STRIPE_LOG_TRAPPED, &sh->state);
27431e6d690bSSong Liu }
27441e6d690bSSong Liu
27451e6d690bSSong Liu return 0;
27461e6d690bSSong Liu }
27471e6d690bSSong Liu
27481e6d690bSSong Liu /*
27491e6d690bSSong Liu * free extra pages (orig_page) we allocated for prexor
27501e6d690bSSong Liu */
r5c_release_extra_page(struct stripe_head * sh)27511e6d690bSSong Liu void r5c_release_extra_page(struct stripe_head *sh)
27521e6d690bSSong Liu {
2753d7bd398eSSong Liu struct r5conf *conf = sh->raid_conf;
27541e6d690bSSong Liu int i;
2755d7bd398eSSong Liu bool using_disk_info_extra_page;
2756d7bd398eSSong Liu
2757d7bd398eSSong Liu using_disk_info_extra_page =
2758d7bd398eSSong Liu sh->dev[0].orig_page == conf->disks[0].extra_page;
27591e6d690bSSong Liu
27601e6d690bSSong Liu for (i = sh->disks; i--; )
27611e6d690bSSong Liu if (sh->dev[i].page != sh->dev[i].orig_page) {
27621e6d690bSSong Liu struct page *p = sh->dev[i].orig_page;
27631e6d690bSSong Liu
27641e6d690bSSong Liu sh->dev[i].orig_page = sh->dev[i].page;
276586aa1397SSong Liu clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
276686aa1397SSong Liu
2767d7bd398eSSong Liu if (!using_disk_info_extra_page)
27681e6d690bSSong Liu put_page(p);
27691e6d690bSSong Liu }
2770d7bd398eSSong Liu
2771d7bd398eSSong Liu if (using_disk_info_extra_page) {
2772d7bd398eSSong Liu clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2773d7bd398eSSong Liu md_wakeup_thread(conf->mddev->thread);
2774d7bd398eSSong Liu }
2775d7bd398eSSong Liu }
2776d7bd398eSSong Liu
r5c_use_extra_page(struct stripe_head * sh)2777d7bd398eSSong Liu void r5c_use_extra_page(struct stripe_head *sh)
2778d7bd398eSSong Liu {
2779d7bd398eSSong Liu struct r5conf *conf = sh->raid_conf;
2780d7bd398eSSong Liu int i;
2781d7bd398eSSong Liu struct r5dev *dev;
2782d7bd398eSSong Liu
2783d7bd398eSSong Liu for (i = sh->disks; i--; ) {
2784d7bd398eSSong Liu dev = &sh->dev[i];
2785d7bd398eSSong Liu if (dev->orig_page != dev->page)
2786d7bd398eSSong Liu put_page(dev->orig_page);
2787d7bd398eSSong Liu dev->orig_page = conf->disks[i].extra_page;
2788d7bd398eSSong Liu }
27891e6d690bSSong Liu }
27902ded3703SSong Liu
27912ded3703SSong Liu /*
27922ded3703SSong Liu * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
27932ded3703SSong Liu * stripe is committed to RAID disks.
27942ded3703SSong Liu */
r5c_finish_stripe_write_out(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s)27952ded3703SSong Liu void r5c_finish_stripe_write_out(struct r5conf *conf,
27962ded3703SSong Liu struct stripe_head *sh,
27972ded3703SSong Liu struct stripe_head_state *s)
27982ded3703SSong Liu {
279906a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
28001e6d690bSSong Liu int i;
28011e6d690bSSong Liu int do_wakeup = 0;
280203b047f4SSong Liu sector_t tree_index;
28036f28c5c3SLogan Gunthorpe void __rcu **pslot;
280403b047f4SSong Liu uintptr_t refcount;
28051e6d690bSSong Liu
280603b047f4SSong Liu if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
28072ded3703SSong Liu return;
28082ded3703SSong Liu
28092ded3703SSong Liu WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
28102ded3703SSong Liu clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
28112ded3703SSong Liu
281203b047f4SSong Liu if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
28132ded3703SSong Liu return;
28141e6d690bSSong Liu
28151e6d690bSSong Liu for (i = sh->disks; i--; ) {
28161e6d690bSSong Liu clear_bit(R5_InJournal, &sh->dev[i].flags);
28171e6d690bSSong Liu if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
28181e6d690bSSong Liu do_wakeup = 1;
28191e6d690bSSong Liu }
28201e6d690bSSong Liu
28211e6d690bSSong Liu /*
28221e6d690bSSong Liu * analyse_stripe() runs before r5c_finish_stripe_write_out(),
28231e6d690bSSong Liu * We updated R5_InJournal, so we also update s->injournal.
28241e6d690bSSong Liu */
28251e6d690bSSong Liu s->injournal = 0;
28261e6d690bSSong Liu
28271e6d690bSSong Liu if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
28281e6d690bSSong Liu if (atomic_dec_and_test(&conf->pending_full_writes))
28291e6d690bSSong Liu md_wakeup_thread(conf->mddev->thread);
28301e6d690bSSong Liu
28311e6d690bSSong Liu if (do_wakeup)
28321e6d690bSSong Liu wake_up(&conf->wait_for_overlap);
2833a39f7afdSSong Liu
283403b047f4SSong Liu spin_lock_irq(&log->stripe_in_journal_lock);
2835a39f7afdSSong Liu list_del_init(&sh->r5c);
283603b047f4SSong Liu spin_unlock_irq(&log->stripe_in_journal_lock);
2837a39f7afdSSong Liu sh->log_start = MaxSector;
283803b047f4SSong Liu
283903b047f4SSong Liu atomic_dec(&log->stripe_in_journal_count);
284003b047f4SSong Liu r5c_update_log_state(log);
284103b047f4SSong Liu
284203b047f4SSong Liu /* stop counting this stripe in big_stripe_tree */
284303b047f4SSong Liu if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
284403b047f4SSong Liu test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
284503b047f4SSong Liu tree_index = r5c_tree_index(conf, sh->sector);
284603b047f4SSong Liu spin_lock(&log->tree_lock);
284703b047f4SSong Liu pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
284803b047f4SSong Liu tree_index);
284903b047f4SSong Liu BUG_ON(pslot == NULL);
285003b047f4SSong Liu refcount = (uintptr_t)radix_tree_deref_slot_protected(
285103b047f4SSong Liu pslot, &log->tree_lock) >>
285203b047f4SSong Liu R5C_RADIX_COUNT_SHIFT;
285303b047f4SSong Liu if (refcount == 1)
285403b047f4SSong Liu radix_tree_delete(&log->big_stripe_tree, tree_index);
285503b047f4SSong Liu else
285603b047f4SSong Liu radix_tree_replace_slot(
285703b047f4SSong Liu &log->big_stripe_tree, pslot,
285803b047f4SSong Liu (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
285903b047f4SSong Liu spin_unlock(&log->tree_lock);
286003b047f4SSong Liu }
286103b047f4SSong Liu
286203b047f4SSong Liu if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
286303b047f4SSong Liu BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
2864e33fbb9cSShaohua Li atomic_dec(&conf->r5c_flushing_partial_stripes);
286503b047f4SSong Liu atomic_dec(&conf->r5c_cached_partial_stripes);
286603b047f4SSong Liu }
286703b047f4SSong Liu
286803b047f4SSong Liu if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
286903b047f4SSong Liu BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
2870e33fbb9cSShaohua Li atomic_dec(&conf->r5c_flushing_full_stripes);
287103b047f4SSong Liu atomic_dec(&conf->r5c_cached_full_stripes);
287203b047f4SSong Liu }
2873ea17481fSSong Liu
2874ea17481fSSong Liu r5l_append_flush_payload(log, sh->sector);
28755ddf0440SSong Liu /* stripe is flused to raid disks, we can do resync now */
28765ddf0440SSong Liu if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
28775ddf0440SSong Liu set_bit(STRIPE_HANDLE, &sh->state);
28781e6d690bSSong Liu }
28791e6d690bSSong Liu
r5c_cache_data(struct r5l_log * log,struct stripe_head * sh)2880ff875738SArtur Paszkiewicz int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
28811e6d690bSSong Liu {
2882a39f7afdSSong Liu struct r5conf *conf = sh->raid_conf;
28831e6d690bSSong Liu int pages = 0;
28841e6d690bSSong Liu int reserve;
28851e6d690bSSong Liu int i;
28861e6d690bSSong Liu int ret = 0;
28871e6d690bSSong Liu
28881e6d690bSSong Liu BUG_ON(!log);
28891e6d690bSSong Liu
28901e6d690bSSong Liu for (i = 0; i < sh->disks; i++) {
28911e6d690bSSong Liu void *addr;
28921e6d690bSSong Liu
28931e6d690bSSong Liu if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
28941e6d690bSSong Liu continue;
28951e6d690bSSong Liu addr = kmap_atomic(sh->dev[i].page);
28961e6d690bSSong Liu sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
28971e6d690bSSong Liu addr, PAGE_SIZE);
28981e6d690bSSong Liu kunmap_atomic(addr);
28991e6d690bSSong Liu pages++;
29001e6d690bSSong Liu }
29011e6d690bSSong Liu WARN_ON(pages == 0);
29021e6d690bSSong Liu
29031e6d690bSSong Liu /*
29041e6d690bSSong Liu * The stripe must enter state machine again to call endio, so
29051e6d690bSSong Liu * don't delay.
29061e6d690bSSong Liu */
29071e6d690bSSong Liu clear_bit(STRIPE_DELAYED, &sh->state);
29081e6d690bSSong Liu atomic_inc(&sh->count);
29091e6d690bSSong Liu
29101e6d690bSSong Liu mutex_lock(&log->io_mutex);
29111e6d690bSSong Liu /* meta + data */
29121e6d690bSSong Liu reserve = (1 + pages) << (PAGE_SHIFT - 9);
29131e6d690bSSong Liu
2914a39f7afdSSong Liu if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2915a39f7afdSSong Liu sh->log_start == MaxSector)
2916a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
2917a39f7afdSSong Liu else if (!r5l_has_free_space(log, reserve)) {
2918a39f7afdSSong Liu if (sh->log_start == log->last_checkpoint)
2919a39f7afdSSong Liu BUG();
2920a39f7afdSSong Liu else
2921a39f7afdSSong Liu r5l_add_no_space_stripe(log, sh);
29221e6d690bSSong Liu } else {
29231e6d690bSSong Liu ret = r5l_log_stripe(log, sh, pages, 0);
29241e6d690bSSong Liu if (ret) {
29251e6d690bSSong Liu spin_lock_irq(&log->io_list_lock);
29261e6d690bSSong Liu list_add_tail(&sh->log_list, &log->no_mem_stripes);
29271e6d690bSSong Liu spin_unlock_irq(&log->io_list_lock);
29281e6d690bSSong Liu }
29291e6d690bSSong Liu }
29301e6d690bSSong Liu
29311e6d690bSSong Liu mutex_unlock(&log->io_mutex);
29321e6d690bSSong Liu return 0;
2933f6bed0efSShaohua Li }
2934f6bed0efSShaohua Li
293503b047f4SSong Liu /* check whether this big stripe is in write back cache. */
r5c_big_stripe_cached(struct r5conf * conf,sector_t sect)293603b047f4SSong Liu bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
293703b047f4SSong Liu {
293806a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
293903b047f4SSong Liu sector_t tree_index;
294003b047f4SSong Liu void *slot;
294103b047f4SSong Liu
294203b047f4SSong Liu if (!log)
294303b047f4SSong Liu return false;
294403b047f4SSong Liu
294503b047f4SSong Liu tree_index = r5c_tree_index(conf, sect);
294603b047f4SSong Liu slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
294703b047f4SSong Liu return slot != NULL;
294803b047f4SSong Liu }
294903b047f4SSong Liu
r5l_load_log(struct r5l_log * log)2950f6bed0efSShaohua Li static int r5l_load_log(struct r5l_log *log)
2951f6bed0efSShaohua Li {
2952f6bed0efSShaohua Li struct md_rdev *rdev = log->rdev;
2953f6bed0efSShaohua Li struct page *page;
2954f6bed0efSShaohua Li struct r5l_meta_block *mb;
2955f6bed0efSShaohua Li sector_t cp = log->rdev->journal_tail;
2956f6bed0efSShaohua Li u32 stored_crc, expected_crc;
2957f6bed0efSShaohua Li bool create_super = false;
2958d30dfeb9SJackieLiu int ret = 0;
2959f6bed0efSShaohua Li
2960f6bed0efSShaohua Li /* Make sure it's valid */
2961f6bed0efSShaohua Li if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2962f6bed0efSShaohua Li cp = 0;
2963f6bed0efSShaohua Li page = alloc_page(GFP_KERNEL);
2964f6bed0efSShaohua Li if (!page)
2965f6bed0efSShaohua Li return -ENOMEM;
2966f6bed0efSShaohua Li
29674ce4c73fSBart Van Assche if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
2968f6bed0efSShaohua Li ret = -EIO;
2969f6bed0efSShaohua Li goto ioerr;
2970f6bed0efSShaohua Li }
2971f6bed0efSShaohua Li mb = page_address(page);
2972f6bed0efSShaohua Li
2973f6bed0efSShaohua Li if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2974f6bed0efSShaohua Li mb->version != R5LOG_VERSION) {
2975f6bed0efSShaohua Li create_super = true;
2976f6bed0efSShaohua Li goto create;
2977f6bed0efSShaohua Li }
2978f6bed0efSShaohua Li stored_crc = le32_to_cpu(mb->checksum);
2979f6bed0efSShaohua Li mb->checksum = 0;
29805cb2fbd6SShaohua Li expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2981f6bed0efSShaohua Li if (stored_crc != expected_crc) {
2982f6bed0efSShaohua Li create_super = true;
2983f6bed0efSShaohua Li goto create;
2984f6bed0efSShaohua Li }
2985f6bed0efSShaohua Li if (le64_to_cpu(mb->position) != cp) {
2986f6bed0efSShaohua Li create_super = true;
2987f6bed0efSShaohua Li goto create;
2988f6bed0efSShaohua Li }
2989f6bed0efSShaohua Li create:
2990f6bed0efSShaohua Li if (create_super) {
2991a251c17aSJason A. Donenfeld log->last_cp_seq = get_random_u32();
2992f6bed0efSShaohua Li cp = 0;
299356056c2eSZhengyuan Liu r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
2994f6bed0efSShaohua Li /*
2995f6bed0efSShaohua Li * Make sure super points to correct address. Log might have
2996f6bed0efSShaohua Li * data very soon. If super hasn't correct log tail address,
2997f6bed0efSShaohua Li * recovery can't find the log
2998f6bed0efSShaohua Li */
2999f6bed0efSShaohua Li r5l_write_super(log, cp);
3000f6bed0efSShaohua Li } else
3001f6bed0efSShaohua Li log->last_cp_seq = le64_to_cpu(mb->seq);
3002f6bed0efSShaohua Li
3003f6bed0efSShaohua Li log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
30040576b1c6SShaohua Li log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
30050576b1c6SShaohua Li if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
30060576b1c6SShaohua Li log->max_free_space = RECLAIM_MAX_FREE_SPACE;
3007f6bed0efSShaohua Li log->last_checkpoint = cp;
3008f6bed0efSShaohua Li
3009f6bed0efSShaohua Li __free_page(page);
3010f6bed0efSShaohua Li
3011d30dfeb9SJackieLiu if (create_super) {
3012d30dfeb9SJackieLiu log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3013d30dfeb9SJackieLiu log->seq = log->last_cp_seq + 1;
3014d30dfeb9SJackieLiu log->next_checkpoint = cp;
3015d30dfeb9SJackieLiu } else
30163d7e7e1dSZhengyuan Liu ret = r5l_recovery_log(log);
3017d30dfeb9SJackieLiu
30183d7e7e1dSZhengyuan Liu r5c_update_log_state(log);
30193d7e7e1dSZhengyuan Liu return ret;
3020f6bed0efSShaohua Li ioerr:
3021f6bed0efSShaohua Li __free_page(page);
3022f6bed0efSShaohua Li return ret;
3023f6bed0efSShaohua Li }
3024f6bed0efSShaohua Li
r5l_start(struct r5l_log * log)3025d5d885fdSSong Liu int r5l_start(struct r5l_log *log)
3026d5d885fdSSong Liu {
3027d5d885fdSSong Liu int ret;
3028d5d885fdSSong Liu
3029d5d885fdSSong Liu if (!log)
3030d5d885fdSSong Liu return 0;
3031d5d885fdSSong Liu
3032d5d885fdSSong Liu ret = r5l_load_log(log);
3033d5d885fdSSong Liu if (ret) {
3034d5d885fdSSong Liu struct mddev *mddev = log->rdev->mddev;
3035d5d885fdSSong Liu struct r5conf *conf = mddev->private;
3036d5d885fdSSong Liu
3037d5d885fdSSong Liu r5l_exit_log(conf);
3038d5d885fdSSong Liu }
3039d5d885fdSSong Liu return ret;
3040d5d885fdSSong Liu }
3041d5d885fdSSong Liu
r5c_update_on_rdev_error(struct mddev * mddev,struct md_rdev * rdev)304270d466f7SSong Liu void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
30432e38a37fSSong Liu {
30442e38a37fSSong Liu struct r5conf *conf = mddev->private;
304506a4d0d8SYu Kuai struct r5l_log *log = READ_ONCE(conf->log);
30462e38a37fSSong Liu
30472e38a37fSSong Liu if (!log)
30482e38a37fSSong Liu return;
30492e38a37fSSong Liu
305070d466f7SSong Liu if ((raid5_calc_degraded(conf) > 0 ||
305170d466f7SSong Liu test_bit(Journal, &rdev->flags)) &&
305206a4d0d8SYu Kuai log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
30532e38a37fSSong Liu schedule_work(&log->disable_writeback_work);
30542e38a37fSSong Liu }
30552e38a37fSSong Liu
r5l_init_log(struct r5conf * conf,struct md_rdev * rdev)3056f6bed0efSShaohua Li int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
3057f6bed0efSShaohua Li {
3058f6bed0efSShaohua Li struct r5l_log *log;
305944693154SYu Kuai struct md_thread *thread;
3060afeee514SKent Overstreet int ret;
3061ff875738SArtur Paszkiewicz
3062913cce5aSChristoph Hellwig pr_debug("md/raid:%s: using device %pg as journal\n",
3063913cce5aSChristoph Hellwig mdname(conf->mddev), rdev->bdev);
3064f6bed0efSShaohua Li
3065f6bed0efSShaohua Li if (PAGE_SIZE != 4096)
3066f6bed0efSShaohua Li return -EINVAL;
3067c757ec95SSong Liu
3068c757ec95SSong Liu /*
3069c757ec95SSong Liu * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3070c757ec95SSong Liu * raid_disks r5l_payload_data_parity.
3071c757ec95SSong Liu *
3072c757ec95SSong Liu * Write journal and cache does not work for very big array
3073c757ec95SSong Liu * (raid_disks > 203)
3074c757ec95SSong Liu */
3075c757ec95SSong Liu if (sizeof(struct r5l_meta_block) +
3076c757ec95SSong Liu ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3077c757ec95SSong Liu conf->raid_disks) > PAGE_SIZE) {
3078c757ec95SSong Liu pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3079c757ec95SSong Liu mdname(conf->mddev), conf->raid_disks);
3080c757ec95SSong Liu return -EINVAL;
3081c757ec95SSong Liu }
3082c757ec95SSong Liu
3083f6bed0efSShaohua Li log = kzalloc(sizeof(*log), GFP_KERNEL);
3084f6bed0efSShaohua Li if (!log)
3085f6bed0efSShaohua Li return -ENOMEM;
3086f6bed0efSShaohua Li log->rdev = rdev;
3087ad831a16SChristoph Hellwig log->need_cache_flush = bdev_write_cache(rdev->bdev);
30885cb2fbd6SShaohua Li log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3089f6bed0efSShaohua Li sizeof(rdev->mddev->uuid));
3090f6bed0efSShaohua Li
3091f6bed0efSShaohua Li mutex_init(&log->io_mutex);
3092f6bed0efSShaohua Li
3093f6bed0efSShaohua Li spin_lock_init(&log->io_list_lock);
3094f6bed0efSShaohua Li INIT_LIST_HEAD(&log->running_ios);
30950576b1c6SShaohua Li INIT_LIST_HEAD(&log->io_end_ios);
3096a8c34f91SShaohua Li INIT_LIST_HEAD(&log->flushing_ios);
309704732f74SChristoph Hellwig INIT_LIST_HEAD(&log->finished_ios);
3098f6bed0efSShaohua Li
3099f6bed0efSShaohua Li log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3100f6bed0efSShaohua Li if (!log->io_kc)
3101f6bed0efSShaohua Li goto io_kc;
3102f6bed0efSShaohua Li
3103afeee514SKent Overstreet ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3104afeee514SKent Overstreet if (ret)
31055036c390SChristoph Hellwig goto io_pool;
31065036c390SChristoph Hellwig
3107afeee514SKent Overstreet ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3108afeee514SKent Overstreet if (ret)
3109c38d29b3SChristoph Hellwig goto io_bs;
3110c38d29b3SChristoph Hellwig
3111afeee514SKent Overstreet ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3112afeee514SKent Overstreet if (ret)
3113e8deb638SChristoph Hellwig goto out_mempool;
3114e8deb638SChristoph Hellwig
311503b047f4SSong Liu spin_lock_init(&log->tree_lock);
311603b047f4SSong Liu INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
311703b047f4SSong Liu
311844693154SYu Kuai thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
311944693154SYu Kuai "reclaim");
312044693154SYu Kuai if (!thread)
31210576b1c6SShaohua Li goto reclaim_thread;
312244693154SYu Kuai
312344693154SYu Kuai thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
312444693154SYu Kuai rcu_assign_pointer(log->reclaim_thread, thread);
3125a39f7afdSSong Liu
31260fd22b45SShaohua Li init_waitqueue_head(&log->iounit_wait);
31270576b1c6SShaohua Li
31285036c390SChristoph Hellwig INIT_LIST_HEAD(&log->no_mem_stripes);
31295036c390SChristoph Hellwig
3130f6bed0efSShaohua Li INIT_LIST_HEAD(&log->no_space_stripes);
3131f6bed0efSShaohua Li spin_lock_init(&log->no_space_stripes_lock);
3132f6bed0efSShaohua Li
31333bddb7f8SSong Liu INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
31342e38a37fSSong Liu INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
31353bddb7f8SSong Liu
31362ded3703SSong Liu log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3137a39f7afdSSong Liu INIT_LIST_HEAD(&log->stripe_in_journal_list);
3138a39f7afdSSong Liu spin_lock_init(&log->stripe_in_journal_lock);
3139a39f7afdSSong Liu atomic_set(&log->stripe_in_journal_count, 0);
31402ded3703SSong Liu
314106a4d0d8SYu Kuai WRITE_ONCE(conf->log, log);
3142d2250f10SSong Liu
3143a62ab49eSShaohua Li set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
3144f6bed0efSShaohua Li return 0;
3145e8deb638SChristoph Hellwig
31460576b1c6SShaohua Li reclaim_thread:
3147afeee514SKent Overstreet mempool_exit(&log->meta_pool);
3148e8deb638SChristoph Hellwig out_mempool:
3149afeee514SKent Overstreet bioset_exit(&log->bs);
3150c38d29b3SChristoph Hellwig io_bs:
3151afeee514SKent Overstreet mempool_exit(&log->io_pool);
31525036c390SChristoph Hellwig io_pool:
3153f6bed0efSShaohua Li kmem_cache_destroy(log->io_kc);
3154f6bed0efSShaohua Li io_kc:
3155f6bed0efSShaohua Li kfree(log);
3156f6bed0efSShaohua Li return -EINVAL;
3157f6bed0efSShaohua Li }
3158f6bed0efSShaohua Li
r5l_exit_log(struct r5conf * conf)3159ff875738SArtur Paszkiewicz void r5l_exit_log(struct r5conf *conf)
3160f6bed0efSShaohua Li {
3161ff875738SArtur Paszkiewicz struct r5l_log *log = conf->log;
3162ff875738SArtur Paszkiewicz
31637eb8ff02SLi Lingfeng md_unregister_thread(conf->mddev, &log->reclaim_thread);
3164b13015afSLogan Gunthorpe
3165a705b11bSYu Kuai /*
3166a705b11bSYu Kuai * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3167a705b11bSYu Kuai * ensure disable_writeback_work wakes up and exits.
3168a705b11bSYu Kuai */
316906a4d0d8SYu Kuai WRITE_ONCE(conf->log, NULL);
3170a705b11bSYu Kuai wake_up(&conf->mddev->sb_wait);
3171a705b11bSYu Kuai flush_work(&log->disable_writeback_work);
3172b13015afSLogan Gunthorpe
3173afeee514SKent Overstreet mempool_exit(&log->meta_pool);
3174afeee514SKent Overstreet bioset_exit(&log->bs);
3175afeee514SKent Overstreet mempool_exit(&log->io_pool);
3176f6bed0efSShaohua Li kmem_cache_destroy(log->io_kc);
3177f6bed0efSShaohua Li kfree(log);
3178f6bed0efSShaohua Li }
3179