xref: /linux/drivers/md/raid5-cache.c (revision 26bb0d3f38a764b743a3ad5c8b6e5b5044d7ceb4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4  * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
5  */
6 #include <linux/kernel.h>
7 #include <linux/wait.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/raid/md_p.h>
11 #include <linux/crc32c.h>
12 #include <linux/random.h>
13 #include <linux/kthread.h>
14 #include <linux/types.h>
15 #include "md.h"
16 #include "raid5.h"
17 #include "md-bitmap.h"
18 #include "raid5-log.h"
19 
20 /*
21  * metadata/data stored in disk with 4k size unit (a block) regardless
22  * underneath hardware sector size. only works with PAGE_SIZE == 4096
23  */
24 #define BLOCK_SECTORS (8)
25 #define BLOCK_SECTOR_SHIFT (3)
26 
27 /*
28  * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
29  *
30  * In write through mode, the reclaim runs every log->max_free_space.
31  * This can prevent the recovery scans for too long
32  */
33 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
34 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
35 
36 /* wake up reclaim thread periodically */
37 #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
38 /* start flush with these full stripes */
39 #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
40 /* reclaim stripes in groups */
41 #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
42 
43 /*
44  * We only need 2 bios per I/O unit to make progress, but ensure we
45  * have a few more available to not get too tight.
46  */
47 #define R5L_POOL_SIZE	4
48 
49 static char *r5c_journal_mode_str[] = {"write-through",
50 				       "write-back"};
51 /*
52  * raid5 cache state machine
53  *
54  * With the RAID cache, each stripe works in two phases:
55  *	- caching phase
56  *	- writing-out phase
57  *
58  * These two phases are controlled by bit STRIPE_R5C_CACHING:
59  *   if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
60  *   if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
61  *
62  * When there is no journal, or the journal is in write-through mode,
63  * the stripe is always in writing-out phase.
64  *
65  * For write-back journal, the stripe is sent to caching phase on write
66  * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
67  * the write-out phase by clearing STRIPE_R5C_CACHING.
68  *
69  * Stripes in caching phase do not write the raid disks. Instead, all
70  * writes are committed from the log device. Therefore, a stripe in
71  * caching phase handles writes as:
72  *	- write to log device
73  *	- return IO
74  *
75  * Stripes in writing-out phase handle writes as:
76  *	- calculate parity
77  *	- write pending data and parity to journal
78  *	- write data and parity to raid disks
79  *	- return IO for pending writes
80  */
81 
82 struct r5l_log {
83 	struct md_rdev *rdev;
84 
85 	u32 uuid_checksum;
86 
87 	sector_t device_size;		/* log device size, round to
88 					 * BLOCK_SECTORS */
89 	sector_t max_free_space;	/* reclaim run if free space is at
90 					 * this size */
91 
92 	sector_t last_checkpoint;	/* log tail. where recovery scan
93 					 * starts from */
94 	u64 last_cp_seq;		/* log tail sequence */
95 
96 	sector_t log_start;		/* log head. where new data appends */
97 	u64 seq;			/* log head sequence */
98 
99 	sector_t next_checkpoint;
100 
101 	struct mutex io_mutex;
102 	struct r5l_io_unit *current_io;	/* current io_unit accepting new data */
103 
104 	spinlock_t io_list_lock;
105 	struct list_head running_ios;	/* io_units which are still running,
106 					 * and have not yet been completely
107 					 * written to the log */
108 	struct list_head io_end_ios;	/* io_units which have been completely
109 					 * written to the log but not yet written
110 					 * to the RAID */
111 	struct list_head flushing_ios;	/* io_units which are waiting for log
112 					 * cache flush */
113 	struct list_head finished_ios;	/* io_units which settle down in log disk */
114 	struct bio flush_bio;
115 
116 	struct list_head no_mem_stripes;   /* pending stripes, -ENOMEM */
117 
118 	struct kmem_cache *io_kc;
119 	mempool_t io_pool;
120 	struct bio_set bs;
121 	mempool_t meta_pool;
122 
123 	struct md_thread __rcu *reclaim_thread;
124 	unsigned long reclaim_target;	/* number of space that need to be
125 					 * reclaimed.  if it's 0, reclaim spaces
126 					 * used by io_units which are in
127 					 * IO_UNIT_STRIPE_END state (eg, reclaim
128 					 * doesn't wait for specific io_unit
129 					 * switching to IO_UNIT_STRIPE_END
130 					 * state) */
131 	wait_queue_head_t iounit_wait;
132 
133 	struct list_head no_space_stripes; /* pending stripes, log has no space */
134 	spinlock_t no_space_stripes_lock;
135 
136 	bool need_cache_flush;
137 
138 	/* for r5c_cache */
139 	enum r5c_journal_mode r5c_journal_mode;
140 
141 	/* all stripes in r5cache, in the order of seq at sh->log_start */
142 	struct list_head stripe_in_journal_list;
143 
144 	spinlock_t stripe_in_journal_lock;
145 	atomic_t stripe_in_journal_count;
146 
147 	/* to submit async io_units, to fulfill ordering of flush */
148 	struct work_struct deferred_io_work;
149 	/* to disable write back during in degraded mode */
150 	struct work_struct disable_writeback_work;
151 
152 	/* to for chunk_aligned_read in writeback mode, details below */
153 	spinlock_t tree_lock;
154 	struct radix_tree_root big_stripe_tree;
155 };
156 
157 /*
158  * Enable chunk_aligned_read() with write back cache.
159  *
160  * Each chunk may contain more than one stripe (for example, a 256kB
161  * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
162  * chunk_aligned_read, these stripes are grouped into one "big_stripe".
163  * For each big_stripe, we count how many stripes of this big_stripe
164  * are in the write back cache. These data are tracked in a radix tree
165  * (big_stripe_tree). We use radix_tree item pointer as the counter.
166  * r5c_tree_index() is used to calculate keys for the radix tree.
167  *
168  * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
169  * big_stripe of each chunk in the tree. If this big_stripe is in the
170  * tree, chunk_aligned_read() aborts. This look up is protected by
171  * rcu_read_lock().
172  *
173  * It is necessary to remember whether a stripe is counted in
174  * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
175  * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
176  * two flags are set, the stripe is counted in big_stripe_tree. This
177  * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
178  * r5c_try_caching_write(); and moving clear_bit of
179  * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
180  * r5c_finish_stripe_write_out().
181  */
182 
183 /*
184  * radix tree requests lowest 2 bits of data pointer to be 2b'00.
185  * So it is necessary to left shift the counter by 2 bits before using it
186  * as data pointer of the tree.
187  */
188 #define R5C_RADIX_COUNT_SHIFT 2
189 
190 /*
191  * calculate key for big_stripe_tree
192  *
193  * sect: align_bi->bi_iter.bi_sector or sh->sector
194  */
r5c_tree_index(struct r5conf * conf,sector_t sect)195 static inline sector_t r5c_tree_index(struct r5conf *conf,
196 				      sector_t sect)
197 {
198 	sector_div(sect, conf->chunk_sectors);
199 	return sect;
200 }
201 
202 /*
203  * an IO range starts from a meta data block and end at the next meta data
204  * block. The io unit's the meta data block tracks data/parity followed it. io
205  * unit is written to log disk with normal write, as we always flush log disk
206  * first and then start move data to raid disks, there is no requirement to
207  * write io unit with FLUSH/FUA
208  */
209 struct r5l_io_unit {
210 	struct r5l_log *log;
211 
212 	struct page *meta_page;	/* store meta block */
213 	int meta_offset;	/* current offset in meta_page */
214 
215 	struct bio *current_bio;/* current_bio accepting new data */
216 
217 	atomic_t pending_stripe;/* how many stripes not flushed to raid */
218 	u64 seq;		/* seq number of the metablock */
219 	sector_t log_start;	/* where the io_unit starts */
220 	sector_t log_end;	/* where the io_unit ends */
221 	struct list_head log_sibling; /* log->running_ios */
222 	struct list_head stripe_list; /* stripes added to the io_unit */
223 
224 	int state;
225 	bool need_split_bio;
226 	struct bio *split_bio;
227 
228 	unsigned int has_flush:1;		/* include flush request */
229 	unsigned int has_fua:1;			/* include fua request */
230 	unsigned int has_null_flush:1;		/* include null flush request */
231 	unsigned int has_flush_payload:1;	/* include flush payload  */
232 	/*
233 	 * io isn't sent yet, flush/fua request can only be submitted till it's
234 	 * the first IO in running_ios list
235 	 */
236 	unsigned int io_deferred:1;
237 
238 	struct bio_list flush_barriers;   /* size == 0 flush bios */
239 };
240 
241 /* r5l_io_unit state */
242 enum r5l_io_unit_state {
243 	IO_UNIT_RUNNING = 0,	/* accepting new IO */
244 	IO_UNIT_IO_START = 1,	/* io_unit bio start writing to log,
245 				 * don't accepting new bio */
246 	IO_UNIT_IO_END = 2,	/* io_unit bio finish writing to log */
247 	IO_UNIT_STRIPE_END = 3,	/* stripes data finished writing to raid */
248 };
249 
r5c_is_writeback(struct r5l_log * log)250 bool r5c_is_writeback(struct r5l_log *log)
251 {
252 	return (log != NULL &&
253 		log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
254 }
255 
r5l_ring_add(struct r5l_log * log,sector_t start,sector_t inc)256 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
257 {
258 	start += inc;
259 	if (start >= log->device_size)
260 		start = start - log->device_size;
261 	return start;
262 }
263 
r5l_ring_distance(struct r5l_log * log,sector_t start,sector_t end)264 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
265 				  sector_t end)
266 {
267 	if (end >= start)
268 		return end - start;
269 	else
270 		return end + log->device_size - start;
271 }
272 
r5l_has_free_space(struct r5l_log * log,sector_t size)273 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
274 {
275 	sector_t used_size;
276 
277 	used_size = r5l_ring_distance(log, log->last_checkpoint,
278 					log->log_start);
279 
280 	return log->device_size > used_size + size;
281 }
282 
__r5l_set_io_unit_state(struct r5l_io_unit * io,enum r5l_io_unit_state state)283 static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
284 				    enum r5l_io_unit_state state)
285 {
286 	if (WARN_ON(io->state >= state))
287 		return;
288 	io->state = state;
289 }
290 
291 static void
r5c_return_dev_pending_writes(struct r5conf * conf,struct r5dev * dev)292 r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
293 {
294 	struct bio *wbi, *wbi2;
295 
296 	wbi = dev->written;
297 	dev->written = NULL;
298 	while (wbi && wbi->bi_iter.bi_sector <
299 	       dev->sector + RAID5_STRIPE_SECTORS(conf)) {
300 		wbi2 = r5_next_bio(conf, wbi, dev->sector);
301 		md_write_end(conf->mddev);
302 		bio_endio(wbi);
303 		wbi = wbi2;
304 	}
305 }
306 
r5c_handle_cached_data_endio(struct r5conf * conf,struct stripe_head * sh,int disks)307 void r5c_handle_cached_data_endio(struct r5conf *conf,
308 				  struct stripe_head *sh, int disks)
309 {
310 	int i;
311 
312 	for (i = sh->disks; i--; ) {
313 		if (sh->dev[i].written) {
314 			set_bit(R5_UPTODATE, &sh->dev[i].flags);
315 			r5c_return_dev_pending_writes(conf, &sh->dev[i]);
316 			conf->mddev->bitmap_ops->endwrite(conf->mddev,
317 					sh->sector, RAID5_STRIPE_SECTORS(conf),
318 					!test_bit(STRIPE_DEGRADED, &sh->state),
319 					false);
320 		}
321 	}
322 }
323 
324 void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
325 
326 /* Check whether we should flush some stripes to free up stripe cache */
r5c_check_stripe_cache_usage(struct r5conf * conf)327 void r5c_check_stripe_cache_usage(struct r5conf *conf)
328 {
329 	int total_cached;
330 	struct r5l_log *log = READ_ONCE(conf->log);
331 
332 	if (!r5c_is_writeback(log))
333 		return;
334 
335 	total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
336 		atomic_read(&conf->r5c_cached_full_stripes);
337 
338 	/*
339 	 * The following condition is true for either of the following:
340 	 *   - stripe cache pressure high:
341 	 *          total_cached > 3/4 min_nr_stripes ||
342 	 *          empty_inactive_list_nr > 0
343 	 *   - stripe cache pressure moderate:
344 	 *          total_cached > 1/2 min_nr_stripes
345 	 */
346 	if (total_cached > conf->min_nr_stripes * 1 / 2 ||
347 	    atomic_read(&conf->empty_inactive_list_nr) > 0)
348 		r5l_wake_reclaim(log, 0);
349 }
350 
351 /*
352  * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
353  * stripes in the cache
354  */
r5c_check_cached_full_stripe(struct r5conf * conf)355 void r5c_check_cached_full_stripe(struct r5conf *conf)
356 {
357 	struct r5l_log *log = READ_ONCE(conf->log);
358 
359 	if (!r5c_is_writeback(log))
360 		return;
361 
362 	/*
363 	 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
364 	 * or a full stripe (chunk size / 4k stripes).
365 	 */
366 	if (atomic_read(&conf->r5c_cached_full_stripes) >=
367 	    min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
368 		conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
369 		r5l_wake_reclaim(log, 0);
370 }
371 
372 /*
373  * Total log space (in sectors) needed to flush all data in cache
374  *
375  * To avoid deadlock due to log space, it is necessary to reserve log
376  * space to flush critical stripes (stripes that occupying log space near
377  * last_checkpoint). This function helps check how much log space is
378  * required to flush all cached stripes.
379  *
380  * To reduce log space requirements, two mechanisms are used to give cache
381  * flush higher priorities:
382  *    1. In handle_stripe_dirtying() and schedule_reconstruction(),
383  *       stripes ALREADY in journal can be flushed w/o pending writes;
384  *    2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
385  *       can be delayed (r5l_add_no_space_stripe).
386  *
387  * In cache flush, the stripe goes through 1 and then 2. For a stripe that
388  * already passed 1, flushing it requires at most (conf->max_degraded + 1)
389  * pages of journal space. For stripes that has not passed 1, flushing it
390  * requires (conf->raid_disks + 1) pages of journal space. There are at
391  * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
392  * required to flush all cached stripes (in pages) is:
393  *
394  *     (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
395  *     (group_cnt + 1) * (raid_disks + 1)
396  * or
397  *     (stripe_in_journal_count) * (max_degraded + 1) +
398  *     (group_cnt + 1) * (raid_disks - max_degraded)
399  */
r5c_log_required_to_flush_cache(struct r5conf * conf)400 static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
401 {
402 	struct r5l_log *log = READ_ONCE(conf->log);
403 
404 	if (!r5c_is_writeback(log))
405 		return 0;
406 
407 	return BLOCK_SECTORS *
408 		((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
409 		 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
410 }
411 
412 /*
413  * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
414  *
415  * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
416  * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
417  * device is less than 2x of reclaim_required_space.
418  */
r5c_update_log_state(struct r5l_log * log)419 static inline void r5c_update_log_state(struct r5l_log *log)
420 {
421 	struct r5conf *conf = log->rdev->mddev->private;
422 	sector_t free_space;
423 	sector_t reclaim_space;
424 	bool wake_reclaim = false;
425 
426 	if (!r5c_is_writeback(log))
427 		return;
428 
429 	free_space = r5l_ring_distance(log, log->log_start,
430 				       log->last_checkpoint);
431 	reclaim_space = r5c_log_required_to_flush_cache(conf);
432 	if (free_space < 2 * reclaim_space)
433 		set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
434 	else {
435 		if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
436 			wake_reclaim = true;
437 		clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
438 	}
439 	if (free_space < 3 * reclaim_space)
440 		set_bit(R5C_LOG_TIGHT, &conf->cache_state);
441 	else
442 		clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
443 
444 	if (wake_reclaim)
445 		r5l_wake_reclaim(log, 0);
446 }
447 
448 /*
449  * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
450  * This function should only be called in write-back mode.
451  */
r5c_make_stripe_write_out(struct stripe_head * sh)452 void r5c_make_stripe_write_out(struct stripe_head *sh)
453 {
454 	struct r5conf *conf = sh->raid_conf;
455 	struct r5l_log *log = READ_ONCE(conf->log);
456 
457 	BUG_ON(!r5c_is_writeback(log));
458 
459 	WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
460 	clear_bit(STRIPE_R5C_CACHING, &sh->state);
461 
462 	if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
463 		atomic_inc(&conf->preread_active_stripes);
464 }
465 
r5c_handle_data_cached(struct stripe_head * sh)466 static void r5c_handle_data_cached(struct stripe_head *sh)
467 {
468 	int i;
469 
470 	for (i = sh->disks; i--; )
471 		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
472 			set_bit(R5_InJournal, &sh->dev[i].flags);
473 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
474 		}
475 	clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
476 }
477 
478 /*
479  * this journal write must contain full parity,
480  * it may also contain some data pages
481  */
r5c_handle_parity_cached(struct stripe_head * sh)482 static void r5c_handle_parity_cached(struct stripe_head *sh)
483 {
484 	int i;
485 
486 	for (i = sh->disks; i--; )
487 		if (test_bit(R5_InJournal, &sh->dev[i].flags))
488 			set_bit(R5_Wantwrite, &sh->dev[i].flags);
489 }
490 
491 /*
492  * Setting proper flags after writing (or flushing) data and/or parity to the
493  * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
494  */
r5c_finish_cache_stripe(struct stripe_head * sh)495 static void r5c_finish_cache_stripe(struct stripe_head *sh)
496 {
497 	struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
498 
499 	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
500 		BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
501 		/*
502 		 * Set R5_InJournal for parity dev[pd_idx]. This means
503 		 * all data AND parity in the journal. For RAID 6, it is
504 		 * NOT necessary to set the flag for dev[qd_idx], as the
505 		 * two parities are written out together.
506 		 */
507 		set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
508 	} else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
509 		r5c_handle_data_cached(sh);
510 	} else {
511 		r5c_handle_parity_cached(sh);
512 		set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
513 	}
514 }
515 
r5l_io_run_stripes(struct r5l_io_unit * io)516 static void r5l_io_run_stripes(struct r5l_io_unit *io)
517 {
518 	struct stripe_head *sh, *next;
519 
520 	list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
521 		list_del_init(&sh->log_list);
522 
523 		r5c_finish_cache_stripe(sh);
524 
525 		set_bit(STRIPE_HANDLE, &sh->state);
526 		raid5_release_stripe(sh);
527 	}
528 }
529 
r5l_log_run_stripes(struct r5l_log * log)530 static void r5l_log_run_stripes(struct r5l_log *log)
531 {
532 	struct r5l_io_unit *io, *next;
533 
534 	lockdep_assert_held(&log->io_list_lock);
535 
536 	list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
537 		/* don't change list order */
538 		if (io->state < IO_UNIT_IO_END)
539 			break;
540 
541 		list_move_tail(&io->log_sibling, &log->finished_ios);
542 		r5l_io_run_stripes(io);
543 	}
544 }
545 
r5l_move_to_end_ios(struct r5l_log * log)546 static void r5l_move_to_end_ios(struct r5l_log *log)
547 {
548 	struct r5l_io_unit *io, *next;
549 
550 	lockdep_assert_held(&log->io_list_lock);
551 
552 	list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
553 		/* don't change list order */
554 		if (io->state < IO_UNIT_IO_END)
555 			break;
556 		list_move_tail(&io->log_sibling, &log->io_end_ios);
557 	}
558 }
559 
560 static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
r5l_log_endio(struct bio * bio)561 static void r5l_log_endio(struct bio *bio)
562 {
563 	struct r5l_io_unit *io = bio->bi_private;
564 	struct r5l_io_unit *io_deferred;
565 	struct r5l_log *log = io->log;
566 	unsigned long flags;
567 	bool has_null_flush;
568 	bool has_flush_payload;
569 
570 	if (bio->bi_status)
571 		md_error(log->rdev->mddev, log->rdev);
572 
573 	bio_put(bio);
574 	mempool_free(io->meta_page, &log->meta_pool);
575 
576 	spin_lock_irqsave(&log->io_list_lock, flags);
577 	__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
578 
579 	/*
580 	 * if the io doesn't not have null_flush or flush payload,
581 	 * it is not safe to access it after releasing io_list_lock.
582 	 * Therefore, it is necessary to check the condition with
583 	 * the lock held.
584 	 */
585 	has_null_flush = io->has_null_flush;
586 	has_flush_payload = io->has_flush_payload;
587 
588 	if (log->need_cache_flush && !list_empty(&io->stripe_list))
589 		r5l_move_to_end_ios(log);
590 	else
591 		r5l_log_run_stripes(log);
592 	if (!list_empty(&log->running_ios)) {
593 		/*
594 		 * FLUSH/FUA io_unit is deferred because of ordering, now we
595 		 * can dispatch it
596 		 */
597 		io_deferred = list_first_entry(&log->running_ios,
598 					       struct r5l_io_unit, log_sibling);
599 		if (io_deferred->io_deferred)
600 			schedule_work(&log->deferred_io_work);
601 	}
602 
603 	spin_unlock_irqrestore(&log->io_list_lock, flags);
604 
605 	if (log->need_cache_flush)
606 		md_wakeup_thread(log->rdev->mddev->thread);
607 
608 	/* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
609 	if (has_null_flush) {
610 		struct bio *bi;
611 
612 		WARN_ON(bio_list_empty(&io->flush_barriers));
613 		while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
614 			bio_endio(bi);
615 			if (atomic_dec_and_test(&io->pending_stripe)) {
616 				__r5l_stripe_write_finished(io);
617 				return;
618 			}
619 		}
620 	}
621 	/* decrease pending_stripe for flush payload */
622 	if (has_flush_payload)
623 		if (atomic_dec_and_test(&io->pending_stripe))
624 			__r5l_stripe_write_finished(io);
625 }
626 
r5l_do_submit_io(struct r5l_log * log,struct r5l_io_unit * io)627 static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
628 {
629 	unsigned long flags;
630 
631 	spin_lock_irqsave(&log->io_list_lock, flags);
632 	__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
633 	spin_unlock_irqrestore(&log->io_list_lock, flags);
634 
635 	/*
636 	 * In case of journal device failures, submit_bio will get error
637 	 * and calls endio, then active stripes will continue write
638 	 * process. Therefore, it is not necessary to check Faulty bit
639 	 * of journal device here.
640 	 *
641 	 * We can't check split_bio after current_bio is submitted. If
642 	 * io->split_bio is null, after current_bio is submitted, current_bio
643 	 * might already be completed and the io_unit is freed. We submit
644 	 * split_bio first to avoid the issue.
645 	 */
646 	if (io->split_bio) {
647 		if (io->has_flush)
648 			io->split_bio->bi_opf |= REQ_PREFLUSH;
649 		if (io->has_fua)
650 			io->split_bio->bi_opf |= REQ_FUA;
651 		submit_bio(io->split_bio);
652 	}
653 
654 	if (io->has_flush)
655 		io->current_bio->bi_opf |= REQ_PREFLUSH;
656 	if (io->has_fua)
657 		io->current_bio->bi_opf |= REQ_FUA;
658 	submit_bio(io->current_bio);
659 }
660 
661 /* deferred io_unit will be dispatched here */
r5l_submit_io_async(struct work_struct * work)662 static void r5l_submit_io_async(struct work_struct *work)
663 {
664 	struct r5l_log *log = container_of(work, struct r5l_log,
665 					   deferred_io_work);
666 	struct r5l_io_unit *io = NULL;
667 	unsigned long flags;
668 
669 	spin_lock_irqsave(&log->io_list_lock, flags);
670 	if (!list_empty(&log->running_ios)) {
671 		io = list_first_entry(&log->running_ios, struct r5l_io_unit,
672 				      log_sibling);
673 		if (!io->io_deferred)
674 			io = NULL;
675 		else
676 			io->io_deferred = 0;
677 	}
678 	spin_unlock_irqrestore(&log->io_list_lock, flags);
679 	if (io)
680 		r5l_do_submit_io(log, io);
681 }
682 
r5c_disable_writeback_async(struct work_struct * work)683 static void r5c_disable_writeback_async(struct work_struct *work)
684 {
685 	struct r5l_log *log = container_of(work, struct r5l_log,
686 					   disable_writeback_work);
687 	struct mddev *mddev = log->rdev->mddev;
688 	struct r5conf *conf = mddev->private;
689 
690 	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
691 		return;
692 	pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
693 		mdname(mddev));
694 
695 	/* wait superblock change before suspend */
696 	wait_event(mddev->sb_wait,
697 		   !READ_ONCE(conf->log) ||
698 		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
699 
700 	log = READ_ONCE(conf->log);
701 	if (log) {
702 		mddev_suspend(mddev, false);
703 		log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
704 		mddev_resume(mddev);
705 	}
706 }
707 
r5l_submit_current_io(struct r5l_log * log)708 static void r5l_submit_current_io(struct r5l_log *log)
709 {
710 	struct r5l_io_unit *io = log->current_io;
711 	struct r5l_meta_block *block;
712 	unsigned long flags;
713 	u32 crc;
714 	bool do_submit = true;
715 
716 	if (!io)
717 		return;
718 
719 	block = page_address(io->meta_page);
720 	block->meta_size = cpu_to_le32(io->meta_offset);
721 	crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
722 	block->checksum = cpu_to_le32(crc);
723 
724 	log->current_io = NULL;
725 	spin_lock_irqsave(&log->io_list_lock, flags);
726 	if (io->has_flush || io->has_fua) {
727 		if (io != list_first_entry(&log->running_ios,
728 					   struct r5l_io_unit, log_sibling)) {
729 			io->io_deferred = 1;
730 			do_submit = false;
731 		}
732 	}
733 	spin_unlock_irqrestore(&log->io_list_lock, flags);
734 	if (do_submit)
735 		r5l_do_submit_io(log, io);
736 }
737 
r5l_bio_alloc(struct r5l_log * log)738 static struct bio *r5l_bio_alloc(struct r5l_log *log)
739 {
740 	struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
741 					   REQ_OP_WRITE, GFP_NOIO, &log->bs);
742 
743 	bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
744 
745 	return bio;
746 }
747 
r5_reserve_log_entry(struct r5l_log * log,struct r5l_io_unit * io)748 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
749 {
750 	log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
751 
752 	r5c_update_log_state(log);
753 	/*
754 	 * If we filled up the log device start from the beginning again,
755 	 * which will require a new bio.
756 	 *
757 	 * Note: for this to work properly the log size needs to me a multiple
758 	 * of BLOCK_SECTORS.
759 	 */
760 	if (log->log_start == 0)
761 		io->need_split_bio = true;
762 
763 	io->log_end = log->log_start;
764 }
765 
r5l_new_meta(struct r5l_log * log)766 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
767 {
768 	struct r5l_io_unit *io;
769 	struct r5l_meta_block *block;
770 
771 	io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
772 	if (!io)
773 		return NULL;
774 	memset(io, 0, sizeof(*io));
775 
776 	io->log = log;
777 	INIT_LIST_HEAD(&io->log_sibling);
778 	INIT_LIST_HEAD(&io->stripe_list);
779 	bio_list_init(&io->flush_barriers);
780 	io->state = IO_UNIT_RUNNING;
781 
782 	io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
783 	block = page_address(io->meta_page);
784 	clear_page(block);
785 	block->magic = cpu_to_le32(R5LOG_MAGIC);
786 	block->version = R5LOG_VERSION;
787 	block->seq = cpu_to_le64(log->seq);
788 	block->position = cpu_to_le64(log->log_start);
789 
790 	io->log_start = log->log_start;
791 	io->meta_offset = sizeof(struct r5l_meta_block);
792 	io->seq = log->seq++;
793 
794 	io->current_bio = r5l_bio_alloc(log);
795 	io->current_bio->bi_end_io = r5l_log_endio;
796 	io->current_bio->bi_private = io;
797 	__bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
798 
799 	r5_reserve_log_entry(log, io);
800 
801 	spin_lock_irq(&log->io_list_lock);
802 	list_add_tail(&io->log_sibling, &log->running_ios);
803 	spin_unlock_irq(&log->io_list_lock);
804 
805 	return io;
806 }
807 
r5l_get_meta(struct r5l_log * log,unsigned int payload_size)808 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
809 {
810 	if (log->current_io &&
811 	    log->current_io->meta_offset + payload_size > PAGE_SIZE)
812 		r5l_submit_current_io(log);
813 
814 	if (!log->current_io) {
815 		log->current_io = r5l_new_meta(log);
816 		if (!log->current_io)
817 			return -ENOMEM;
818 	}
819 
820 	return 0;
821 }
822 
r5l_append_payload_meta(struct r5l_log * log,u16 type,sector_t location,u32 checksum1,u32 checksum2,bool checksum2_valid)823 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
824 				    sector_t location,
825 				    u32 checksum1, u32 checksum2,
826 				    bool checksum2_valid)
827 {
828 	struct r5l_io_unit *io = log->current_io;
829 	struct r5l_payload_data_parity *payload;
830 
831 	payload = page_address(io->meta_page) + io->meta_offset;
832 	payload->header.type = cpu_to_le16(type);
833 	payload->header.flags = cpu_to_le16(0);
834 	payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
835 				    (PAGE_SHIFT - 9));
836 	payload->location = cpu_to_le64(location);
837 	payload->checksum[0] = cpu_to_le32(checksum1);
838 	if (checksum2_valid)
839 		payload->checksum[1] = cpu_to_le32(checksum2);
840 
841 	io->meta_offset += sizeof(struct r5l_payload_data_parity) +
842 		sizeof(__le32) * (1 + !!checksum2_valid);
843 }
844 
r5l_append_payload_page(struct r5l_log * log,struct page * page)845 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
846 {
847 	struct r5l_io_unit *io = log->current_io;
848 
849 	if (io->need_split_bio) {
850 		BUG_ON(io->split_bio);
851 		io->split_bio = io->current_bio;
852 		io->current_bio = r5l_bio_alloc(log);
853 		bio_chain(io->current_bio, io->split_bio);
854 		io->need_split_bio = false;
855 	}
856 
857 	if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
858 		BUG();
859 
860 	r5_reserve_log_entry(log, io);
861 }
862 
r5l_append_flush_payload(struct r5l_log * log,sector_t sect)863 static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
864 {
865 	struct mddev *mddev = log->rdev->mddev;
866 	struct r5conf *conf = mddev->private;
867 	struct r5l_io_unit *io;
868 	struct r5l_payload_flush *payload;
869 	int meta_size;
870 
871 	/*
872 	 * payload_flush requires extra writes to the journal.
873 	 * To avoid handling the extra IO in quiesce, just skip
874 	 * flush_payload
875 	 */
876 	if (conf->quiesce)
877 		return;
878 
879 	mutex_lock(&log->io_mutex);
880 	meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
881 
882 	if (r5l_get_meta(log, meta_size)) {
883 		mutex_unlock(&log->io_mutex);
884 		return;
885 	}
886 
887 	/* current implementation is one stripe per flush payload */
888 	io = log->current_io;
889 	payload = page_address(io->meta_page) + io->meta_offset;
890 	payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
891 	payload->header.flags = cpu_to_le16(0);
892 	payload->size = cpu_to_le32(sizeof(__le64));
893 	payload->flush_stripes[0] = cpu_to_le64(sect);
894 	io->meta_offset += meta_size;
895 	/* multiple flush payloads count as one pending_stripe */
896 	if (!io->has_flush_payload) {
897 		io->has_flush_payload = 1;
898 		atomic_inc(&io->pending_stripe);
899 	}
900 	mutex_unlock(&log->io_mutex);
901 }
902 
r5l_log_stripe(struct r5l_log * log,struct stripe_head * sh,int data_pages,int parity_pages)903 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
904 			   int data_pages, int parity_pages)
905 {
906 	int i;
907 	int meta_size;
908 	int ret;
909 	struct r5l_io_unit *io;
910 
911 	meta_size =
912 		((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
913 		 * data_pages) +
914 		sizeof(struct r5l_payload_data_parity) +
915 		sizeof(__le32) * parity_pages;
916 
917 	ret = r5l_get_meta(log, meta_size);
918 	if (ret)
919 		return ret;
920 
921 	io = log->current_io;
922 
923 	if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
924 		io->has_flush = 1;
925 
926 	for (i = 0; i < sh->disks; i++) {
927 		if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
928 		    test_bit(R5_InJournal, &sh->dev[i].flags))
929 			continue;
930 		if (i == sh->pd_idx || i == sh->qd_idx)
931 			continue;
932 		if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
933 		    log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
934 			io->has_fua = 1;
935 			/*
936 			 * we need to flush journal to make sure recovery can
937 			 * reach the data with fua flag
938 			 */
939 			io->has_flush = 1;
940 		}
941 		r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
942 					raid5_compute_blocknr(sh, i, 0),
943 					sh->dev[i].log_checksum, 0, false);
944 		r5l_append_payload_page(log, sh->dev[i].page);
945 	}
946 
947 	if (parity_pages == 2) {
948 		r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
949 					sh->sector, sh->dev[sh->pd_idx].log_checksum,
950 					sh->dev[sh->qd_idx].log_checksum, true);
951 		r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
952 		r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
953 	} else if (parity_pages == 1) {
954 		r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
955 					sh->sector, sh->dev[sh->pd_idx].log_checksum,
956 					0, false);
957 		r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
958 	} else  /* Just writing data, not parity, in caching phase */
959 		BUG_ON(parity_pages != 0);
960 
961 	list_add_tail(&sh->log_list, &io->stripe_list);
962 	atomic_inc(&io->pending_stripe);
963 	sh->log_io = io;
964 
965 	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
966 		return 0;
967 
968 	if (sh->log_start == MaxSector) {
969 		BUG_ON(!list_empty(&sh->r5c));
970 		sh->log_start = io->log_start;
971 		spin_lock_irq(&log->stripe_in_journal_lock);
972 		list_add_tail(&sh->r5c,
973 			      &log->stripe_in_journal_list);
974 		spin_unlock_irq(&log->stripe_in_journal_lock);
975 		atomic_inc(&log->stripe_in_journal_count);
976 	}
977 	return 0;
978 }
979 
980 /* add stripe to no_space_stripes, and then wake up reclaim */
r5l_add_no_space_stripe(struct r5l_log * log,struct stripe_head * sh)981 static inline void r5l_add_no_space_stripe(struct r5l_log *log,
982 					   struct stripe_head *sh)
983 {
984 	spin_lock(&log->no_space_stripes_lock);
985 	list_add_tail(&sh->log_list, &log->no_space_stripes);
986 	spin_unlock(&log->no_space_stripes_lock);
987 }
988 
989 /*
990  * running in raid5d, where reclaim could wait for raid5d too (when it flushes
991  * data from log to raid disks), so we shouldn't wait for reclaim here
992  */
r5l_write_stripe(struct r5l_log * log,struct stripe_head * sh)993 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
994 {
995 	struct r5conf *conf = sh->raid_conf;
996 	int write_disks = 0;
997 	int data_pages, parity_pages;
998 	int reserve;
999 	int i;
1000 	int ret = 0;
1001 	bool wake_reclaim = false;
1002 
1003 	if (!log)
1004 		return -EAGAIN;
1005 	/* Don't support stripe batch */
1006 	if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1007 	    test_bit(STRIPE_SYNCING, &sh->state)) {
1008 		/* the stripe is written to log, we start writing it to raid */
1009 		clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
1010 		return -EAGAIN;
1011 	}
1012 
1013 	WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
1014 
1015 	for (i = 0; i < sh->disks; i++) {
1016 		void *addr;
1017 
1018 		if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
1019 		    test_bit(R5_InJournal, &sh->dev[i].flags))
1020 			continue;
1021 
1022 		write_disks++;
1023 		/* checksum is already calculated in last run */
1024 		if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1025 			continue;
1026 		addr = kmap_atomic(sh->dev[i].page);
1027 		sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1028 						    addr, PAGE_SIZE);
1029 		kunmap_atomic(addr);
1030 	}
1031 	parity_pages = 1 + !!(sh->qd_idx >= 0);
1032 	data_pages = write_disks - parity_pages;
1033 
1034 	set_bit(STRIPE_LOG_TRAPPED, &sh->state);
1035 	/*
1036 	 * The stripe must enter state machine again to finish the write, so
1037 	 * don't delay.
1038 	 */
1039 	clear_bit(STRIPE_DELAYED, &sh->state);
1040 	atomic_inc(&sh->count);
1041 
1042 	mutex_lock(&log->io_mutex);
1043 	/* meta + data */
1044 	reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
1045 
1046 	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1047 		if (!r5l_has_free_space(log, reserve)) {
1048 			r5l_add_no_space_stripe(log, sh);
1049 			wake_reclaim = true;
1050 		} else {
1051 			ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1052 			if (ret) {
1053 				spin_lock_irq(&log->io_list_lock);
1054 				list_add_tail(&sh->log_list,
1055 					      &log->no_mem_stripes);
1056 				spin_unlock_irq(&log->io_list_lock);
1057 			}
1058 		}
1059 	} else {  /* R5C_JOURNAL_MODE_WRITE_BACK */
1060 		/*
1061 		 * log space critical, do not process stripes that are
1062 		 * not in cache yet (sh->log_start == MaxSector).
1063 		 */
1064 		if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1065 		    sh->log_start == MaxSector) {
1066 			r5l_add_no_space_stripe(log, sh);
1067 			wake_reclaim = true;
1068 			reserve = 0;
1069 		} else if (!r5l_has_free_space(log, reserve)) {
1070 			if (sh->log_start == log->last_checkpoint)
1071 				BUG();
1072 			else
1073 				r5l_add_no_space_stripe(log, sh);
1074 		} else {
1075 			ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1076 			if (ret) {
1077 				spin_lock_irq(&log->io_list_lock);
1078 				list_add_tail(&sh->log_list,
1079 					      &log->no_mem_stripes);
1080 				spin_unlock_irq(&log->io_list_lock);
1081 			}
1082 		}
1083 	}
1084 
1085 	mutex_unlock(&log->io_mutex);
1086 	if (wake_reclaim)
1087 		r5l_wake_reclaim(log, reserve);
1088 	return 0;
1089 }
1090 
r5l_write_stripe_run(struct r5l_log * log)1091 void r5l_write_stripe_run(struct r5l_log *log)
1092 {
1093 	if (!log)
1094 		return;
1095 	mutex_lock(&log->io_mutex);
1096 	r5l_submit_current_io(log);
1097 	mutex_unlock(&log->io_mutex);
1098 }
1099 
r5l_handle_flush_request(struct r5l_log * log,struct bio * bio)1100 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1101 {
1102 	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1103 		/*
1104 		 * in write through (journal only)
1105 		 * we flush log disk cache first, then write stripe data to
1106 		 * raid disks. So if bio is finished, the log disk cache is
1107 		 * flushed already. The recovery guarantees we can recovery
1108 		 * the bio from log disk, so we don't need to flush again
1109 		 */
1110 		if (bio->bi_iter.bi_size == 0) {
1111 			bio_endio(bio);
1112 			return 0;
1113 		}
1114 		bio->bi_opf &= ~REQ_PREFLUSH;
1115 	} else {
1116 		/* write back (with cache) */
1117 		if (bio->bi_iter.bi_size == 0) {
1118 			mutex_lock(&log->io_mutex);
1119 			r5l_get_meta(log, 0);
1120 			bio_list_add(&log->current_io->flush_barriers, bio);
1121 			log->current_io->has_flush = 1;
1122 			log->current_io->has_null_flush = 1;
1123 			atomic_inc(&log->current_io->pending_stripe);
1124 			r5l_submit_current_io(log);
1125 			mutex_unlock(&log->io_mutex);
1126 			return 0;
1127 		}
1128 	}
1129 	return -EAGAIN;
1130 }
1131 
1132 /* This will run after log space is reclaimed */
r5l_run_no_space_stripes(struct r5l_log * log)1133 static void r5l_run_no_space_stripes(struct r5l_log *log)
1134 {
1135 	struct stripe_head *sh;
1136 
1137 	spin_lock(&log->no_space_stripes_lock);
1138 	while (!list_empty(&log->no_space_stripes)) {
1139 		sh = list_first_entry(&log->no_space_stripes,
1140 				      struct stripe_head, log_list);
1141 		list_del_init(&sh->log_list);
1142 		set_bit(STRIPE_HANDLE, &sh->state);
1143 		raid5_release_stripe(sh);
1144 	}
1145 	spin_unlock(&log->no_space_stripes_lock);
1146 }
1147 
1148 /*
1149  * calculate new last_checkpoint
1150  * for write through mode, returns log->next_checkpoint
1151  * for write back, returns log_start of first sh in stripe_in_journal_list
1152  */
r5c_calculate_new_cp(struct r5conf * conf)1153 static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1154 {
1155 	struct stripe_head *sh;
1156 	struct r5l_log *log = READ_ONCE(conf->log);
1157 	sector_t new_cp;
1158 	unsigned long flags;
1159 
1160 	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1161 		return log->next_checkpoint;
1162 
1163 	spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1164 	if (list_empty(&log->stripe_in_journal_list)) {
1165 		/* all stripes flushed */
1166 		spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1167 		return log->next_checkpoint;
1168 	}
1169 	sh = list_first_entry(&log->stripe_in_journal_list,
1170 			      struct stripe_head, r5c);
1171 	new_cp = sh->log_start;
1172 	spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1173 	return new_cp;
1174 }
1175 
r5l_reclaimable_space(struct r5l_log * log)1176 static sector_t r5l_reclaimable_space(struct r5l_log *log)
1177 {
1178 	struct r5conf *conf = log->rdev->mddev->private;
1179 
1180 	return r5l_ring_distance(log, log->last_checkpoint,
1181 				 r5c_calculate_new_cp(conf));
1182 }
1183 
r5l_run_no_mem_stripe(struct r5l_log * log)1184 static void r5l_run_no_mem_stripe(struct r5l_log *log)
1185 {
1186 	struct stripe_head *sh;
1187 
1188 	lockdep_assert_held(&log->io_list_lock);
1189 
1190 	if (!list_empty(&log->no_mem_stripes)) {
1191 		sh = list_first_entry(&log->no_mem_stripes,
1192 				      struct stripe_head, log_list);
1193 		list_del_init(&sh->log_list);
1194 		set_bit(STRIPE_HANDLE, &sh->state);
1195 		raid5_release_stripe(sh);
1196 	}
1197 }
1198 
r5l_complete_finished_ios(struct r5l_log * log)1199 static bool r5l_complete_finished_ios(struct r5l_log *log)
1200 {
1201 	struct r5l_io_unit *io, *next;
1202 	bool found = false;
1203 
1204 	lockdep_assert_held(&log->io_list_lock);
1205 
1206 	list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1207 		/* don't change list order */
1208 		if (io->state < IO_UNIT_STRIPE_END)
1209 			break;
1210 
1211 		log->next_checkpoint = io->log_start;
1212 
1213 		list_del(&io->log_sibling);
1214 		mempool_free(io, &log->io_pool);
1215 		r5l_run_no_mem_stripe(log);
1216 
1217 		found = true;
1218 	}
1219 
1220 	return found;
1221 }
1222 
__r5l_stripe_write_finished(struct r5l_io_unit * io)1223 static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1224 {
1225 	struct r5l_log *log = io->log;
1226 	struct r5conf *conf = log->rdev->mddev->private;
1227 	unsigned long flags;
1228 
1229 	spin_lock_irqsave(&log->io_list_lock, flags);
1230 	__r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
1231 
1232 	if (!r5l_complete_finished_ios(log)) {
1233 		spin_unlock_irqrestore(&log->io_list_lock, flags);
1234 		return;
1235 	}
1236 
1237 	if (r5l_reclaimable_space(log) > log->max_free_space ||
1238 	    test_bit(R5C_LOG_TIGHT, &conf->cache_state))
1239 		r5l_wake_reclaim(log, 0);
1240 
1241 	spin_unlock_irqrestore(&log->io_list_lock, flags);
1242 	wake_up(&log->iounit_wait);
1243 }
1244 
r5l_stripe_write_finished(struct stripe_head * sh)1245 void r5l_stripe_write_finished(struct stripe_head *sh)
1246 {
1247 	struct r5l_io_unit *io;
1248 
1249 	io = sh->log_io;
1250 	sh->log_io = NULL;
1251 
1252 	if (io && atomic_dec_and_test(&io->pending_stripe))
1253 		__r5l_stripe_write_finished(io);
1254 }
1255 
r5l_log_flush_endio(struct bio * bio)1256 static void r5l_log_flush_endio(struct bio *bio)
1257 {
1258 	struct r5l_log *log = container_of(bio, struct r5l_log,
1259 		flush_bio);
1260 	unsigned long flags;
1261 	struct r5l_io_unit *io;
1262 
1263 	if (bio->bi_status)
1264 		md_error(log->rdev->mddev, log->rdev);
1265 	bio_uninit(bio);
1266 
1267 	spin_lock_irqsave(&log->io_list_lock, flags);
1268 	list_for_each_entry(io, &log->flushing_ios, log_sibling)
1269 		r5l_io_run_stripes(io);
1270 	list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1271 	spin_unlock_irqrestore(&log->io_list_lock, flags);
1272 }
1273 
1274 /*
1275  * Starting dispatch IO to raid.
1276  * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1277  * broken meta in the middle of a log causes recovery can't find meta at the
1278  * head of log. If operations require meta at the head persistent in log, we
1279  * must make sure meta before it persistent in log too. A case is:
1280  *
1281  * stripe data/parity is in log, we start write stripe to raid disks. stripe
1282  * data/parity must be persistent in log before we do the write to raid disks.
1283  *
1284  * The solution is we restrictly maintain io_unit list order. In this case, we
1285  * only write stripes of an io_unit to raid disks till the io_unit is the first
1286  * one whose data/parity is in log.
1287  */
r5l_flush_stripe_to_raid(struct r5l_log * log)1288 void r5l_flush_stripe_to_raid(struct r5l_log *log)
1289 {
1290 	bool do_flush;
1291 
1292 	if (!log || !log->need_cache_flush)
1293 		return;
1294 
1295 	spin_lock_irq(&log->io_list_lock);
1296 	/* flush bio is running */
1297 	if (!list_empty(&log->flushing_ios)) {
1298 		spin_unlock_irq(&log->io_list_lock);
1299 		return;
1300 	}
1301 	list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1302 	do_flush = !list_empty(&log->flushing_ios);
1303 	spin_unlock_irq(&log->io_list_lock);
1304 
1305 	if (!do_flush)
1306 		return;
1307 	bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1308 		  REQ_OP_WRITE | REQ_PREFLUSH);
1309 	log->flush_bio.bi_end_io = r5l_log_flush_endio;
1310 	submit_bio(&log->flush_bio);
1311 }
1312 
1313 static void r5l_write_super(struct r5l_log *log, sector_t cp);
r5l_write_super_and_discard_space(struct r5l_log * log,sector_t end)1314 static void r5l_write_super_and_discard_space(struct r5l_log *log,
1315 	sector_t end)
1316 {
1317 	struct block_device *bdev = log->rdev->bdev;
1318 	struct mddev *mddev;
1319 
1320 	r5l_write_super(log, end);
1321 
1322 	if (!bdev_max_discard_sectors(bdev))
1323 		return;
1324 
1325 	mddev = log->rdev->mddev;
1326 	/*
1327 	 * Discard could zero data, so before discard we must make sure
1328 	 * superblock is updated to new log tail. Updating superblock (either
1329 	 * directly call md_update_sb() or depend on md thread) must hold
1330 	 * reconfig mutex. On the other hand, raid5_quiesce is called with
1331 	 * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
1332 	 * for all IO finish, hence waiting for reclaim thread, while reclaim
1333 	 * thread is calling this function and waiting for reconfig mutex. So
1334 	 * there is a deadlock. We workaround this issue with a trylock.
1335 	 * FIXME: we could miss discard if we can't take reconfig mutex
1336 	 */
1337 	set_mask_bits(&mddev->sb_flags, 0,
1338 		BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1339 	if (!mddev_trylock(mddev))
1340 		return;
1341 	md_update_sb(mddev, 1);
1342 	mddev_unlock(mddev);
1343 
1344 	/* discard IO error really doesn't matter, ignore it */
1345 	if (log->last_checkpoint < end) {
1346 		blkdev_issue_discard(bdev,
1347 				log->last_checkpoint + log->rdev->data_offset,
1348 				end - log->last_checkpoint, GFP_NOIO);
1349 	} else {
1350 		blkdev_issue_discard(bdev,
1351 				log->last_checkpoint + log->rdev->data_offset,
1352 				log->device_size - log->last_checkpoint,
1353 				GFP_NOIO);
1354 		blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1355 				GFP_NOIO);
1356 	}
1357 }
1358 
1359 /*
1360  * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1361  * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1362  *
1363  * must hold conf->device_lock
1364  */
r5c_flush_stripe(struct r5conf * conf,struct stripe_head * sh)1365 static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1366 {
1367 	BUG_ON(list_empty(&sh->lru));
1368 	BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1369 	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1370 
1371 	/*
1372 	 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1373 	 * raid5_release_stripe() while holding conf->device_lock
1374 	 */
1375 	BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1376 	lockdep_assert_held(&conf->device_lock);
1377 
1378 	list_del_init(&sh->lru);
1379 	atomic_inc(&sh->count);
1380 
1381 	set_bit(STRIPE_HANDLE, &sh->state);
1382 	atomic_inc(&conf->active_stripes);
1383 	r5c_make_stripe_write_out(sh);
1384 
1385 	if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1386 		atomic_inc(&conf->r5c_flushing_partial_stripes);
1387 	else
1388 		atomic_inc(&conf->r5c_flushing_full_stripes);
1389 	raid5_release_stripe(sh);
1390 }
1391 
1392 /*
1393  * if num == 0, flush all full stripes
1394  * if num > 0, flush all full stripes. If less than num full stripes are
1395  *             flushed, flush some partial stripes until totally num stripes are
1396  *             flushed or there is no more cached stripes.
1397  */
r5c_flush_cache(struct r5conf * conf,int num)1398 void r5c_flush_cache(struct r5conf *conf, int num)
1399 {
1400 	int count;
1401 	struct stripe_head *sh, *next;
1402 
1403 	lockdep_assert_held(&conf->device_lock);
1404 	if (!READ_ONCE(conf->log))
1405 		return;
1406 
1407 	count = 0;
1408 	list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1409 		r5c_flush_stripe(conf, sh);
1410 		count++;
1411 	}
1412 
1413 	if (count >= num)
1414 		return;
1415 	list_for_each_entry_safe(sh, next,
1416 				 &conf->r5c_partial_stripe_list, lru) {
1417 		r5c_flush_stripe(conf, sh);
1418 		if (++count >= num)
1419 			break;
1420 	}
1421 }
1422 
r5c_do_reclaim(struct r5conf * conf)1423 static void r5c_do_reclaim(struct r5conf *conf)
1424 {
1425 	struct r5l_log *log = READ_ONCE(conf->log);
1426 	struct stripe_head *sh;
1427 	int count = 0;
1428 	unsigned long flags;
1429 	int total_cached;
1430 	int stripes_to_flush;
1431 	int flushing_partial, flushing_full;
1432 
1433 	if (!r5c_is_writeback(log))
1434 		return;
1435 
1436 	flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1437 	flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
1438 	total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
1439 		atomic_read(&conf->r5c_cached_full_stripes) -
1440 		flushing_full - flushing_partial;
1441 
1442 	if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1443 	    atomic_read(&conf->empty_inactive_list_nr) > 0)
1444 		/*
1445 		 * if stripe cache pressure high, flush all full stripes and
1446 		 * some partial stripes
1447 		 */
1448 		stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1449 	else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
1450 		 atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
1451 		 R5C_FULL_STRIPE_FLUSH_BATCH(conf))
1452 		/*
1453 		 * if stripe cache pressure moderate, or if there is many full
1454 		 * stripes,flush all full stripes
1455 		 */
1456 		stripes_to_flush = 0;
1457 	else
1458 		/* no need to flush */
1459 		stripes_to_flush = -1;
1460 
1461 	if (stripes_to_flush >= 0) {
1462 		spin_lock_irqsave(&conf->device_lock, flags);
1463 		r5c_flush_cache(conf, stripes_to_flush);
1464 		spin_unlock_irqrestore(&conf->device_lock, flags);
1465 	}
1466 
1467 	/* if log space is tight, flush stripes on stripe_in_journal_list */
1468 	if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1469 		spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1470 		spin_lock(&conf->device_lock);
1471 		list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1472 			/*
1473 			 * stripes on stripe_in_journal_list could be in any
1474 			 * state of the stripe_cache state machine. In this
1475 			 * case, we only want to flush stripe on
1476 			 * r5c_cached_full/partial_stripes. The following
1477 			 * condition makes sure the stripe is on one of the
1478 			 * two lists.
1479 			 */
1480 			if (!list_empty(&sh->lru) &&
1481 			    !test_bit(STRIPE_HANDLE, &sh->state) &&
1482 			    atomic_read(&sh->count) == 0) {
1483 				r5c_flush_stripe(conf, sh);
1484 				if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1485 					break;
1486 			}
1487 		}
1488 		spin_unlock(&conf->device_lock);
1489 		spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1490 	}
1491 
1492 	if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1493 		r5l_run_no_space_stripes(log);
1494 
1495 	md_wakeup_thread(conf->mddev->thread);
1496 }
1497 
r5l_do_reclaim(struct r5l_log * log)1498 static void r5l_do_reclaim(struct r5l_log *log)
1499 {
1500 	struct r5conf *conf = log->rdev->mddev->private;
1501 	sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1502 	sector_t reclaimable;
1503 	sector_t next_checkpoint;
1504 	bool write_super;
1505 
1506 	spin_lock_irq(&log->io_list_lock);
1507 	write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1508 		reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1509 	/*
1510 	 * move proper io_unit to reclaim list. We should not change the order.
1511 	 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1512 	 * shouldn't reuse space of an unreclaimable io_unit
1513 	 */
1514 	while (1) {
1515 		reclaimable = r5l_reclaimable_space(log);
1516 		if (reclaimable >= reclaim_target ||
1517 		    (list_empty(&log->running_ios) &&
1518 		     list_empty(&log->io_end_ios) &&
1519 		     list_empty(&log->flushing_ios) &&
1520 		     list_empty(&log->finished_ios)))
1521 			break;
1522 
1523 		md_wakeup_thread(log->rdev->mddev->thread);
1524 		wait_event_lock_irq(log->iounit_wait,
1525 				    r5l_reclaimable_space(log) > reclaimable,
1526 				    log->io_list_lock);
1527 	}
1528 
1529 	next_checkpoint = r5c_calculate_new_cp(conf);
1530 	spin_unlock_irq(&log->io_list_lock);
1531 
1532 	if (reclaimable == 0 || !write_super)
1533 		return;
1534 
1535 	/*
1536 	 * write_super will flush cache of each raid disk. We must write super
1537 	 * here, because the log area might be reused soon and we don't want to
1538 	 * confuse recovery
1539 	 */
1540 	r5l_write_super_and_discard_space(log, next_checkpoint);
1541 
1542 	mutex_lock(&log->io_mutex);
1543 	log->last_checkpoint = next_checkpoint;
1544 	r5c_update_log_state(log);
1545 	mutex_unlock(&log->io_mutex);
1546 
1547 	r5l_run_no_space_stripes(log);
1548 }
1549 
r5l_reclaim_thread(struct md_thread * thread)1550 static void r5l_reclaim_thread(struct md_thread *thread)
1551 {
1552 	struct mddev *mddev = thread->mddev;
1553 	struct r5conf *conf = mddev->private;
1554 	struct r5l_log *log = READ_ONCE(conf->log);
1555 
1556 	if (!log)
1557 		return;
1558 	r5c_do_reclaim(conf);
1559 	r5l_do_reclaim(log);
1560 }
1561 
r5l_wake_reclaim(struct r5l_log * log,sector_t space)1562 void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1563 {
1564 	unsigned long target;
1565 	unsigned long new = (unsigned long)space; /* overflow in theory */
1566 
1567 	if (!log)
1568 		return;
1569 
1570 	target = READ_ONCE(log->reclaim_target);
1571 	do {
1572 		if (new < target)
1573 			return;
1574 	} while (!try_cmpxchg(&log->reclaim_target, &target, new));
1575 	md_wakeup_thread(log->reclaim_thread);
1576 }
1577 
r5l_quiesce(struct r5l_log * log,int quiesce)1578 void r5l_quiesce(struct r5l_log *log, int quiesce)
1579 {
1580 	struct mddev *mddev = log->rdev->mddev;
1581 	struct md_thread *thread = rcu_dereference_protected(
1582 		log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1583 
1584 	if (quiesce) {
1585 		/* make sure r5l_write_super_and_discard_space exits */
1586 		wake_up(&mddev->sb_wait);
1587 		kthread_park(thread->tsk);
1588 		r5l_wake_reclaim(log, MaxSector);
1589 		r5l_do_reclaim(log);
1590 	} else
1591 		kthread_unpark(thread->tsk);
1592 }
1593 
r5l_log_disk_error(struct r5conf * conf)1594 bool r5l_log_disk_error(struct r5conf *conf)
1595 {
1596 	struct r5l_log *log = READ_ONCE(conf->log);
1597 
1598 	/* don't allow write if journal disk is missing */
1599 	if (!log)
1600 		return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1601 	else
1602 		return test_bit(Faulty, &log->rdev->flags);
1603 }
1604 
1605 #define R5L_RECOVERY_PAGE_POOL_SIZE 256
1606 
1607 struct r5l_recovery_ctx {
1608 	struct page *meta_page;		/* current meta */
1609 	sector_t meta_total_blocks;	/* total size of current meta and data */
1610 	sector_t pos;			/* recovery position */
1611 	u64 seq;			/* recovery position seq */
1612 	int data_parity_stripes;	/* number of data_parity stripes */
1613 	int data_only_stripes;		/* number of data_only stripes */
1614 	struct list_head cached_list;
1615 
1616 	/*
1617 	 * read ahead page pool (ra_pool)
1618 	 * in recovery, log is read sequentially. It is not efficient to
1619 	 * read every page with sync_page_io(). The read ahead page pool
1620 	 * reads multiple pages with one IO, so further log read can
1621 	 * just copy data from the pool.
1622 	 */
1623 	struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
1624 	struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
1625 	sector_t pool_offset;	/* offset of first page in the pool */
1626 	int total_pages;	/* total allocated pages */
1627 	int valid_pages;	/* pages with valid data */
1628 };
1629 
r5l_recovery_allocate_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1630 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1631 					    struct r5l_recovery_ctx *ctx)
1632 {
1633 	struct page *page;
1634 
1635 	ctx->valid_pages = 0;
1636 	ctx->total_pages = 0;
1637 	while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1638 		page = alloc_page(GFP_KERNEL);
1639 
1640 		if (!page)
1641 			break;
1642 		ctx->ra_pool[ctx->total_pages] = page;
1643 		ctx->total_pages += 1;
1644 	}
1645 
1646 	if (ctx->total_pages == 0)
1647 		return -ENOMEM;
1648 
1649 	ctx->pool_offset = 0;
1650 	return 0;
1651 }
1652 
r5l_recovery_free_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1653 static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1654 					struct r5l_recovery_ctx *ctx)
1655 {
1656 	int i;
1657 
1658 	for (i = 0; i < ctx->total_pages; ++i)
1659 		put_page(ctx->ra_pool[i]);
1660 }
1661 
1662 /*
1663  * fetch ctx->valid_pages pages from offset
1664  * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1665  * However, if the offset is close to the end of the journal device,
1666  * ctx->valid_pages could be smaller than ctx->total_pages
1667  */
r5l_recovery_fetch_ra_pool(struct r5l_log * log,struct r5l_recovery_ctx * ctx,sector_t offset)1668 static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1669 				      struct r5l_recovery_ctx *ctx,
1670 				      sector_t offset)
1671 {
1672 	struct bio bio;
1673 	int ret;
1674 
1675 	bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1676 		 R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
1677 	bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1678 
1679 	ctx->valid_pages = 0;
1680 	ctx->pool_offset = offset;
1681 
1682 	while (ctx->valid_pages < ctx->total_pages) {
1683 		__bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
1684 			       0);
1685 		ctx->valid_pages += 1;
1686 
1687 		offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1688 
1689 		if (offset == 0)  /* reached end of the device */
1690 			break;
1691 	}
1692 
1693 	ret = submit_bio_wait(&bio);
1694 	bio_uninit(&bio);
1695 	return ret;
1696 }
1697 
1698 /*
1699  * try read a page from the read ahead page pool, if the page is not in the
1700  * pool, call r5l_recovery_fetch_ra_pool
1701  */
r5l_recovery_read_page(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct page * page,sector_t offset)1702 static int r5l_recovery_read_page(struct r5l_log *log,
1703 				  struct r5l_recovery_ctx *ctx,
1704 				  struct page *page,
1705 				  sector_t offset)
1706 {
1707 	int ret;
1708 
1709 	if (offset < ctx->pool_offset ||
1710 	    offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1711 		ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1712 		if (ret)
1713 			return ret;
1714 	}
1715 
1716 	BUG_ON(offset < ctx->pool_offset ||
1717 	       offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1718 
1719 	memcpy(page_address(page),
1720 	       page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1721 					 BLOCK_SECTOR_SHIFT]),
1722 	       PAGE_SIZE);
1723 	return 0;
1724 }
1725 
r5l_recovery_read_meta_block(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1726 static int r5l_recovery_read_meta_block(struct r5l_log *log,
1727 					struct r5l_recovery_ctx *ctx)
1728 {
1729 	struct page *page = ctx->meta_page;
1730 	struct r5l_meta_block *mb;
1731 	u32 crc, stored_crc;
1732 	int ret;
1733 
1734 	ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1735 	if (ret != 0)
1736 		return ret;
1737 
1738 	mb = page_address(page);
1739 	stored_crc = le32_to_cpu(mb->checksum);
1740 	mb->checksum = 0;
1741 
1742 	if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1743 	    le64_to_cpu(mb->seq) != ctx->seq ||
1744 	    mb->version != R5LOG_VERSION ||
1745 	    le64_to_cpu(mb->position) != ctx->pos)
1746 		return -EINVAL;
1747 
1748 	crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1749 	if (stored_crc != crc)
1750 		return -EINVAL;
1751 
1752 	if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1753 		return -EINVAL;
1754 
1755 	ctx->meta_total_blocks = BLOCK_SECTORS;
1756 
1757 	return 0;
1758 }
1759 
1760 static void
r5l_recovery_create_empty_meta_block(struct r5l_log * log,struct page * page,sector_t pos,u64 seq)1761 r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1762 				     struct page *page,
1763 				     sector_t pos, u64 seq)
1764 {
1765 	struct r5l_meta_block *mb;
1766 
1767 	mb = page_address(page);
1768 	clear_page(mb);
1769 	mb->magic = cpu_to_le32(R5LOG_MAGIC);
1770 	mb->version = R5LOG_VERSION;
1771 	mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1772 	mb->seq = cpu_to_le64(seq);
1773 	mb->position = cpu_to_le64(pos);
1774 }
1775 
r5l_log_write_empty_meta_block(struct r5l_log * log,sector_t pos,u64 seq)1776 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1777 					  u64 seq)
1778 {
1779 	struct page *page;
1780 	struct r5l_meta_block *mb;
1781 
1782 	page = alloc_page(GFP_KERNEL);
1783 	if (!page)
1784 		return -ENOMEM;
1785 	r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1786 	mb = page_address(page);
1787 	mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1788 					     mb, PAGE_SIZE));
1789 	if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
1790 			  REQ_SYNC | REQ_FUA, false)) {
1791 		__free_page(page);
1792 		return -EIO;
1793 	}
1794 	__free_page(page);
1795 	return 0;
1796 }
1797 
1798 /*
1799  * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1800  * to mark valid (potentially not flushed) data in the journal.
1801  *
1802  * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1803  * so there should not be any mismatch here.
1804  */
r5l_recovery_load_data(struct r5l_log * log,struct stripe_head * sh,struct r5l_recovery_ctx * ctx,struct r5l_payload_data_parity * payload,sector_t log_offset)1805 static void r5l_recovery_load_data(struct r5l_log *log,
1806 				   struct stripe_head *sh,
1807 				   struct r5l_recovery_ctx *ctx,
1808 				   struct r5l_payload_data_parity *payload,
1809 				   sector_t log_offset)
1810 {
1811 	struct mddev *mddev = log->rdev->mddev;
1812 	struct r5conf *conf = mddev->private;
1813 	int dd_idx;
1814 
1815 	raid5_compute_sector(conf,
1816 			     le64_to_cpu(payload->location), 0,
1817 			     &dd_idx, sh);
1818 	r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1819 	sh->dev[dd_idx].log_checksum =
1820 		le32_to_cpu(payload->checksum[0]);
1821 	ctx->meta_total_blocks += BLOCK_SECTORS;
1822 
1823 	set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1824 	set_bit(STRIPE_R5C_CACHING, &sh->state);
1825 }
1826 
r5l_recovery_load_parity(struct r5l_log * log,struct stripe_head * sh,struct r5l_recovery_ctx * ctx,struct r5l_payload_data_parity * payload,sector_t log_offset)1827 static void r5l_recovery_load_parity(struct r5l_log *log,
1828 				     struct stripe_head *sh,
1829 				     struct r5l_recovery_ctx *ctx,
1830 				     struct r5l_payload_data_parity *payload,
1831 				     sector_t log_offset)
1832 {
1833 	struct mddev *mddev = log->rdev->mddev;
1834 	struct r5conf *conf = mddev->private;
1835 
1836 	ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1837 	r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1838 	sh->dev[sh->pd_idx].log_checksum =
1839 		le32_to_cpu(payload->checksum[0]);
1840 	set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1841 
1842 	if (sh->qd_idx >= 0) {
1843 		r5l_recovery_read_page(
1844 			log, ctx, sh->dev[sh->qd_idx].page,
1845 			r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1846 		sh->dev[sh->qd_idx].log_checksum =
1847 			le32_to_cpu(payload->checksum[1]);
1848 		set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1849 	}
1850 	clear_bit(STRIPE_R5C_CACHING, &sh->state);
1851 }
1852 
r5l_recovery_reset_stripe(struct stripe_head * sh)1853 static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1854 {
1855 	int i;
1856 
1857 	sh->state = 0;
1858 	sh->log_start = MaxSector;
1859 	for (i = sh->disks; i--; )
1860 		sh->dev[i].flags = 0;
1861 }
1862 
1863 static void
r5l_recovery_replay_one_stripe(struct r5conf * conf,struct stripe_head * sh,struct r5l_recovery_ctx * ctx)1864 r5l_recovery_replay_one_stripe(struct r5conf *conf,
1865 			       struct stripe_head *sh,
1866 			       struct r5l_recovery_ctx *ctx)
1867 {
1868 	struct md_rdev *rdev, *rrdev;
1869 	int disk_index;
1870 	int data_count = 0;
1871 
1872 	for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1873 		if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1874 			continue;
1875 		if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1876 			continue;
1877 		data_count++;
1878 	}
1879 
1880 	/*
1881 	 * stripes that only have parity must have been flushed
1882 	 * before the crash that we are now recovering from, so
1883 	 * there is nothing more to recovery.
1884 	 */
1885 	if (data_count == 0)
1886 		goto out;
1887 
1888 	for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1889 		if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1890 			continue;
1891 
1892 		/* in case device is broken */
1893 		rdev = conf->disks[disk_index].rdev;
1894 		if (rdev) {
1895 			atomic_inc(&rdev->nr_pending);
1896 			sync_page_io(rdev, sh->sector, PAGE_SIZE,
1897 				     sh->dev[disk_index].page, REQ_OP_WRITE,
1898 				     false);
1899 			rdev_dec_pending(rdev, rdev->mddev);
1900 		}
1901 		rrdev = conf->disks[disk_index].replacement;
1902 		if (rrdev) {
1903 			atomic_inc(&rrdev->nr_pending);
1904 			sync_page_io(rrdev, sh->sector, PAGE_SIZE,
1905 				     sh->dev[disk_index].page, REQ_OP_WRITE,
1906 				     false);
1907 			rdev_dec_pending(rrdev, rrdev->mddev);
1908 		}
1909 	}
1910 	ctx->data_parity_stripes++;
1911 out:
1912 	r5l_recovery_reset_stripe(sh);
1913 }
1914 
1915 static struct stripe_head *
r5c_recovery_alloc_stripe(struct r5conf * conf,sector_t stripe_sect,int noblock)1916 r5c_recovery_alloc_stripe(
1917 		struct r5conf *conf,
1918 		sector_t stripe_sect,
1919 		int noblock)
1920 {
1921 	struct stripe_head *sh;
1922 
1923 	sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
1924 				     noblock ? R5_GAS_NOBLOCK : 0);
1925 	if (!sh)
1926 		return NULL;  /* no more stripe available */
1927 
1928 	r5l_recovery_reset_stripe(sh);
1929 
1930 	return sh;
1931 }
1932 
1933 static struct stripe_head *
r5c_recovery_lookup_stripe(struct list_head * list,sector_t sect)1934 r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1935 {
1936 	struct stripe_head *sh;
1937 
1938 	list_for_each_entry(sh, list, lru)
1939 		if (sh->sector == sect)
1940 			return sh;
1941 	return NULL;
1942 }
1943 
1944 static void
r5c_recovery_drop_stripes(struct list_head * cached_stripe_list,struct r5l_recovery_ctx * ctx)1945 r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1946 			  struct r5l_recovery_ctx *ctx)
1947 {
1948 	struct stripe_head *sh, *next;
1949 
1950 	list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1951 		r5l_recovery_reset_stripe(sh);
1952 		list_del_init(&sh->lru);
1953 		raid5_release_stripe(sh);
1954 	}
1955 }
1956 
1957 static void
r5c_recovery_replay_stripes(struct list_head * cached_stripe_list,struct r5l_recovery_ctx * ctx)1958 r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1959 			    struct r5l_recovery_ctx *ctx)
1960 {
1961 	struct stripe_head *sh, *next;
1962 
1963 	list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1964 		if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1965 			r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1966 			list_del_init(&sh->lru);
1967 			raid5_release_stripe(sh);
1968 		}
1969 }
1970 
1971 /* if matches return 0; otherwise return -EINVAL */
1972 static int
r5l_recovery_verify_data_checksum(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct page * page,sector_t log_offset,__le32 log_checksum)1973 r5l_recovery_verify_data_checksum(struct r5l_log *log,
1974 				  struct r5l_recovery_ctx *ctx,
1975 				  struct page *page,
1976 				  sector_t log_offset, __le32 log_checksum)
1977 {
1978 	void *addr;
1979 	u32 checksum;
1980 
1981 	r5l_recovery_read_page(log, ctx, page, log_offset);
1982 	addr = kmap_atomic(page);
1983 	checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1984 	kunmap_atomic(addr);
1985 	return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1986 }
1987 
1988 /*
1989  * before loading data to stripe cache, we need verify checksum for all data,
1990  * if there is mismatch for any data page, we drop all data in the mata block
1991  */
1992 static int
r5l_recovery_verify_data_checksum_for_mb(struct r5l_log * log,struct r5l_recovery_ctx * ctx)1993 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1994 					 struct r5l_recovery_ctx *ctx)
1995 {
1996 	struct mddev *mddev = log->rdev->mddev;
1997 	struct r5conf *conf = mddev->private;
1998 	struct r5l_meta_block *mb = page_address(ctx->meta_page);
1999 	sector_t mb_offset = sizeof(struct r5l_meta_block);
2000 	sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2001 	struct page *page;
2002 	struct r5l_payload_data_parity *payload;
2003 	struct r5l_payload_flush *payload_flush;
2004 
2005 	page = alloc_page(GFP_KERNEL);
2006 	if (!page)
2007 		return -ENOMEM;
2008 
2009 	while (mb_offset < le32_to_cpu(mb->meta_size)) {
2010 		payload = (void *)mb + mb_offset;
2011 		payload_flush = (void *)mb + mb_offset;
2012 
2013 		if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2014 			if (r5l_recovery_verify_data_checksum(
2015 				    log, ctx, page, log_offset,
2016 				    payload->checksum[0]) < 0)
2017 				goto mismatch;
2018 		} else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2019 			if (r5l_recovery_verify_data_checksum(
2020 				    log, ctx, page, log_offset,
2021 				    payload->checksum[0]) < 0)
2022 				goto mismatch;
2023 			if (conf->max_degraded == 2 && /* q for RAID 6 */
2024 			    r5l_recovery_verify_data_checksum(
2025 				    log, ctx, page,
2026 				    r5l_ring_add(log, log_offset,
2027 						 BLOCK_SECTORS),
2028 				    payload->checksum[1]) < 0)
2029 				goto mismatch;
2030 		} else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2031 			/* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2032 		} else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2033 			goto mismatch;
2034 
2035 		if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2036 			mb_offset += sizeof(struct r5l_payload_flush) +
2037 				le32_to_cpu(payload_flush->size);
2038 		} else {
2039 			/* DATA or PARITY payload */
2040 			log_offset = r5l_ring_add(log, log_offset,
2041 						  le32_to_cpu(payload->size));
2042 			mb_offset += sizeof(struct r5l_payload_data_parity) +
2043 				sizeof(__le32) *
2044 				(le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2045 		}
2046 
2047 	}
2048 
2049 	put_page(page);
2050 	return 0;
2051 
2052 mismatch:
2053 	put_page(page);
2054 	return -EINVAL;
2055 }
2056 
2057 /*
2058  * Analyze all data/parity pages in one meta block
2059  * Returns:
2060  * 0 for success
2061  * -EINVAL for unknown playload type
2062  * -EAGAIN for checksum mismatch of data page
2063  * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2064  */
2065 static int
r5c_recovery_analyze_meta_block(struct r5l_log * log,struct r5l_recovery_ctx * ctx,struct list_head * cached_stripe_list)2066 r5c_recovery_analyze_meta_block(struct r5l_log *log,
2067 				struct r5l_recovery_ctx *ctx,
2068 				struct list_head *cached_stripe_list)
2069 {
2070 	struct mddev *mddev = log->rdev->mddev;
2071 	struct r5conf *conf = mddev->private;
2072 	struct r5l_meta_block *mb;
2073 	struct r5l_payload_data_parity *payload;
2074 	struct r5l_payload_flush *payload_flush;
2075 	int mb_offset;
2076 	sector_t log_offset;
2077 	sector_t stripe_sect;
2078 	struct stripe_head *sh;
2079 	int ret;
2080 
2081 	/*
2082 	 * for mismatch in data blocks, we will drop all data in this mb, but
2083 	 * we will still read next mb for other data with FLUSH flag, as
2084 	 * io_unit could finish out of order.
2085 	 */
2086 	ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2087 	if (ret == -EINVAL)
2088 		return -EAGAIN;
2089 	else if (ret)
2090 		return ret;   /* -ENOMEM duo to alloc_page() failed */
2091 
2092 	mb = page_address(ctx->meta_page);
2093 	mb_offset = sizeof(struct r5l_meta_block);
2094 	log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2095 
2096 	while (mb_offset < le32_to_cpu(mb->meta_size)) {
2097 		int dd;
2098 
2099 		payload = (void *)mb + mb_offset;
2100 		payload_flush = (void *)mb + mb_offset;
2101 
2102 		if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2103 			int i, count;
2104 
2105 			count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
2106 			for (i = 0; i < count; ++i) {
2107 				stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
2108 				sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2109 								stripe_sect);
2110 				if (sh) {
2111 					WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2112 					r5l_recovery_reset_stripe(sh);
2113 					list_del_init(&sh->lru);
2114 					raid5_release_stripe(sh);
2115 				}
2116 			}
2117 
2118 			mb_offset += sizeof(struct r5l_payload_flush) +
2119 				le32_to_cpu(payload_flush->size);
2120 			continue;
2121 		}
2122 
2123 		/* DATA or PARITY payload */
2124 		stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2125 			raid5_compute_sector(
2126 				conf, le64_to_cpu(payload->location), 0, &dd,
2127 				NULL)
2128 			: le64_to_cpu(payload->location);
2129 
2130 		sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2131 						stripe_sect);
2132 
2133 		if (!sh) {
2134 			sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2135 			/*
2136 			 * cannot get stripe from raid5_get_active_stripe
2137 			 * try replay some stripes
2138 			 */
2139 			if (!sh) {
2140 				r5c_recovery_replay_stripes(
2141 					cached_stripe_list, ctx);
2142 				sh = r5c_recovery_alloc_stripe(
2143 					conf, stripe_sect, 1);
2144 			}
2145 			if (!sh) {
2146 				int new_size = conf->min_nr_stripes * 2;
2147 				pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2148 					mdname(mddev),
2149 					new_size);
2150 				ret = raid5_set_cache_size(mddev, new_size);
2151 				if (conf->min_nr_stripes <= new_size / 2) {
2152 					pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2153 						mdname(mddev),
2154 						ret,
2155 						new_size,
2156 						conf->min_nr_stripes,
2157 						conf->max_nr_stripes);
2158 					return -ENOMEM;
2159 				}
2160 				sh = r5c_recovery_alloc_stripe(
2161 					conf, stripe_sect, 0);
2162 			}
2163 			if (!sh) {
2164 				pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2165 					mdname(mddev));
2166 				return -ENOMEM;
2167 			}
2168 			list_add_tail(&sh->lru, cached_stripe_list);
2169 		}
2170 
2171 		if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2172 			if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2173 			    test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2174 				r5l_recovery_replay_one_stripe(conf, sh, ctx);
2175 				list_move_tail(&sh->lru, cached_stripe_list);
2176 			}
2177 			r5l_recovery_load_data(log, sh, ctx, payload,
2178 					       log_offset);
2179 		} else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2180 			r5l_recovery_load_parity(log, sh, ctx, payload,
2181 						 log_offset);
2182 		else
2183 			return -EINVAL;
2184 
2185 		log_offset = r5l_ring_add(log, log_offset,
2186 					  le32_to_cpu(payload->size));
2187 
2188 		mb_offset += sizeof(struct r5l_payload_data_parity) +
2189 			sizeof(__le32) *
2190 			(le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2191 	}
2192 
2193 	return 0;
2194 }
2195 
2196 /*
2197  * Load the stripe into cache. The stripe will be written out later by
2198  * the stripe cache state machine.
2199  */
r5c_recovery_load_one_stripe(struct r5l_log * log,struct stripe_head * sh)2200 static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2201 					 struct stripe_head *sh)
2202 {
2203 	struct r5dev *dev;
2204 	int i;
2205 
2206 	for (i = sh->disks; i--; ) {
2207 		dev = sh->dev + i;
2208 		if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2209 			set_bit(R5_InJournal, &dev->flags);
2210 			set_bit(R5_UPTODATE, &dev->flags);
2211 		}
2212 	}
2213 }
2214 
2215 /*
2216  * Scan through the log for all to-be-flushed data
2217  *
2218  * For stripes with data and parity, namely Data-Parity stripe
2219  * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2220  *
2221  * For stripes with only data, namely Data-Only stripe
2222  * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2223  *
2224  * For a stripe, if we see data after parity, we should discard all previous
2225  * data and parity for this stripe, as these data are already flushed to
2226  * the array.
2227  *
2228  * At the end of the scan, we return the new journal_tail, which points to
2229  * first data-only stripe on the journal device, or next invalid meta block.
2230  */
r5c_recovery_flush_log(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2231 static int r5c_recovery_flush_log(struct r5l_log *log,
2232 				  struct r5l_recovery_ctx *ctx)
2233 {
2234 	struct stripe_head *sh;
2235 	int ret = 0;
2236 
2237 	/* scan through the log */
2238 	while (1) {
2239 		if (r5l_recovery_read_meta_block(log, ctx))
2240 			break;
2241 
2242 		ret = r5c_recovery_analyze_meta_block(log, ctx,
2243 						      &ctx->cached_list);
2244 		/*
2245 		 * -EAGAIN means mismatch in data block, in this case, we still
2246 		 * try scan the next metablock
2247 		 */
2248 		if (ret && ret != -EAGAIN)
2249 			break;   /* ret == -EINVAL or -ENOMEM */
2250 		ctx->seq++;
2251 		ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2252 	}
2253 
2254 	if (ret == -ENOMEM) {
2255 		r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2256 		return ret;
2257 	}
2258 
2259 	/* replay data-parity stripes */
2260 	r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2261 
2262 	/* load data-only stripes to stripe cache */
2263 	list_for_each_entry(sh, &ctx->cached_list, lru) {
2264 		WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2265 		r5c_recovery_load_one_stripe(log, sh);
2266 		ctx->data_only_stripes++;
2267 	}
2268 
2269 	return 0;
2270 }
2271 
2272 /*
2273  * we did a recovery. Now ctx.pos points to an invalid meta block. New
2274  * log will start here. but we can't let superblock point to last valid
2275  * meta block. The log might looks like:
2276  * | meta 1| meta 2| meta 3|
2277  * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2278  * superblock points to meta 1, we write a new valid meta 2n.  if crash
2279  * happens again, new recovery will start from meta 1. Since meta 2n is
2280  * valid now, recovery will think meta 3 is valid, which is wrong.
2281  * The solution is we create a new meta in meta2 with its seq == meta
2282  * 1's seq + 10000 and let superblock points to meta2. The same recovery
2283  * will not think meta 3 is a valid meta, because its seq doesn't match
2284  */
2285 
2286 /*
2287  * Before recovery, the log looks like the following
2288  *
2289  *   ---------------------------------------------
2290  *   |           valid log        | invalid log  |
2291  *   ---------------------------------------------
2292  *   ^
2293  *   |- log->last_checkpoint
2294  *   |- log->last_cp_seq
2295  *
2296  * Now we scan through the log until we see invalid entry
2297  *
2298  *   ---------------------------------------------
2299  *   |           valid log        | invalid log  |
2300  *   ---------------------------------------------
2301  *   ^                            ^
2302  *   |- log->last_checkpoint      |- ctx->pos
2303  *   |- log->last_cp_seq          |- ctx->seq
2304  *
2305  * From this point, we need to increase seq number by 10 to avoid
2306  * confusing next recovery.
2307  *
2308  *   ---------------------------------------------
2309  *   |           valid log        | invalid log  |
2310  *   ---------------------------------------------
2311  *   ^                              ^
2312  *   |- log->last_checkpoint        |- ctx->pos+1
2313  *   |- log->last_cp_seq            |- ctx->seq+10001
2314  *
2315  * However, it is not safe to start the state machine yet, because data only
2316  * parities are not yet secured in RAID. To save these data only parities, we
2317  * rewrite them from seq+11.
2318  *
2319  *   -----------------------------------------------------------------
2320  *   |           valid log        | data only stripes | invalid log  |
2321  *   -----------------------------------------------------------------
2322  *   ^                                                ^
2323  *   |- log->last_checkpoint                          |- ctx->pos+n
2324  *   |- log->last_cp_seq                              |- ctx->seq+10000+n
2325  *
2326  * If failure happens again during this process, the recovery can safe start
2327  * again from log->last_checkpoint.
2328  *
2329  * Once data only stripes are rewritten to journal, we move log_tail
2330  *
2331  *   -----------------------------------------------------------------
2332  *   |     old log        |    data only stripes    | invalid log  |
2333  *   -----------------------------------------------------------------
2334  *                        ^                         ^
2335  *                        |- log->last_checkpoint   |- ctx->pos+n
2336  *                        |- log->last_cp_seq       |- ctx->seq+10000+n
2337  *
2338  * Then we can safely start the state machine. If failure happens from this
2339  * point on, the recovery will start from new log->last_checkpoint.
2340  */
2341 static int
r5c_recovery_rewrite_data_only_stripes(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2342 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2343 				       struct r5l_recovery_ctx *ctx)
2344 {
2345 	struct stripe_head *sh;
2346 	struct mddev *mddev = log->rdev->mddev;
2347 	struct page *page;
2348 	sector_t next_checkpoint = MaxSector;
2349 
2350 	page = alloc_page(GFP_KERNEL);
2351 	if (!page) {
2352 		pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2353 		       mdname(mddev));
2354 		return -ENOMEM;
2355 	}
2356 
2357 	WARN_ON(list_empty(&ctx->cached_list));
2358 
2359 	list_for_each_entry(sh, &ctx->cached_list, lru) {
2360 		struct r5l_meta_block *mb;
2361 		int i;
2362 		int offset;
2363 		sector_t write_pos;
2364 
2365 		WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2366 		r5l_recovery_create_empty_meta_block(log, page,
2367 						     ctx->pos, ctx->seq);
2368 		mb = page_address(page);
2369 		offset = le32_to_cpu(mb->meta_size);
2370 		write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2371 
2372 		for (i = sh->disks; i--; ) {
2373 			struct r5dev *dev = &sh->dev[i];
2374 			struct r5l_payload_data_parity *payload;
2375 			void *addr;
2376 
2377 			if (test_bit(R5_InJournal, &dev->flags)) {
2378 				payload = (void *)mb + offset;
2379 				payload->header.type = cpu_to_le16(
2380 					R5LOG_PAYLOAD_DATA);
2381 				payload->size = cpu_to_le32(BLOCK_SECTORS);
2382 				payload->location = cpu_to_le64(
2383 					raid5_compute_blocknr(sh, i, 0));
2384 				addr = kmap_atomic(dev->page);
2385 				payload->checksum[0] = cpu_to_le32(
2386 					crc32c_le(log->uuid_checksum, addr,
2387 						  PAGE_SIZE));
2388 				kunmap_atomic(addr);
2389 				sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2390 					     dev->page, REQ_OP_WRITE, false);
2391 				write_pos = r5l_ring_add(log, write_pos,
2392 							 BLOCK_SECTORS);
2393 				offset += sizeof(__le32) +
2394 					sizeof(struct r5l_payload_data_parity);
2395 
2396 			}
2397 		}
2398 		mb->meta_size = cpu_to_le32(offset);
2399 		mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2400 						     mb, PAGE_SIZE));
2401 		sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2402 			     REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
2403 		sh->log_start = ctx->pos;
2404 		list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2405 		atomic_inc(&log->stripe_in_journal_count);
2406 		ctx->pos = write_pos;
2407 		ctx->seq += 1;
2408 		next_checkpoint = sh->log_start;
2409 	}
2410 	log->next_checkpoint = next_checkpoint;
2411 	__free_page(page);
2412 	return 0;
2413 }
2414 
r5c_recovery_flush_data_only_stripes(struct r5l_log * log,struct r5l_recovery_ctx * ctx)2415 static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2416 						 struct r5l_recovery_ctx *ctx)
2417 {
2418 	struct mddev *mddev = log->rdev->mddev;
2419 	struct r5conf *conf = mddev->private;
2420 	struct stripe_head *sh, *next;
2421 	bool cleared_pending = false;
2422 
2423 	if (ctx->data_only_stripes == 0)
2424 		return;
2425 
2426 	if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2427 		cleared_pending = true;
2428 		clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2429 	}
2430 	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2431 
2432 	list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2433 		r5c_make_stripe_write_out(sh);
2434 		set_bit(STRIPE_HANDLE, &sh->state);
2435 		list_del_init(&sh->lru);
2436 		raid5_release_stripe(sh);
2437 	}
2438 
2439 	/* reuse conf->wait_for_quiescent in recovery */
2440 	wait_event(conf->wait_for_quiescent,
2441 		   atomic_read(&conf->active_stripes) == 0);
2442 
2443 	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2444 	if (cleared_pending)
2445 		set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2446 }
2447 
r5l_recovery_log(struct r5l_log * log)2448 static int r5l_recovery_log(struct r5l_log *log)
2449 {
2450 	struct mddev *mddev = log->rdev->mddev;
2451 	struct r5l_recovery_ctx *ctx;
2452 	int ret;
2453 	sector_t pos;
2454 
2455 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2456 	if (!ctx)
2457 		return -ENOMEM;
2458 
2459 	ctx->pos = log->last_checkpoint;
2460 	ctx->seq = log->last_cp_seq;
2461 	INIT_LIST_HEAD(&ctx->cached_list);
2462 	ctx->meta_page = alloc_page(GFP_KERNEL);
2463 
2464 	if (!ctx->meta_page) {
2465 		ret =  -ENOMEM;
2466 		goto meta_page;
2467 	}
2468 
2469 	if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2470 		ret = -ENOMEM;
2471 		goto ra_pool;
2472 	}
2473 
2474 	ret = r5c_recovery_flush_log(log, ctx);
2475 
2476 	if (ret)
2477 		goto error;
2478 
2479 	pos = ctx->pos;
2480 	ctx->seq += 10000;
2481 
2482 	if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
2483 		pr_info("md/raid:%s: starting from clean shutdown\n",
2484 			 mdname(mddev));
2485 	else
2486 		pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2487 			 mdname(mddev), ctx->data_only_stripes,
2488 			 ctx->data_parity_stripes);
2489 
2490 	if (ctx->data_only_stripes == 0) {
2491 		log->next_checkpoint = ctx->pos;
2492 		r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2493 		ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2494 	} else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2495 		pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2496 		       mdname(mddev));
2497 		ret =  -EIO;
2498 		goto error;
2499 	}
2500 
2501 	log->log_start = ctx->pos;
2502 	log->seq = ctx->seq;
2503 	log->last_checkpoint = pos;
2504 	r5l_write_super(log, pos);
2505 
2506 	r5c_recovery_flush_data_only_stripes(log, ctx);
2507 	ret = 0;
2508 error:
2509 	r5l_recovery_free_ra_pool(log, ctx);
2510 ra_pool:
2511 	__free_page(ctx->meta_page);
2512 meta_page:
2513 	kfree(ctx);
2514 	return ret;
2515 }
2516 
r5l_write_super(struct r5l_log * log,sector_t cp)2517 static void r5l_write_super(struct r5l_log *log, sector_t cp)
2518 {
2519 	struct mddev *mddev = log->rdev->mddev;
2520 
2521 	log->rdev->journal_tail = cp;
2522 	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2523 }
2524 
r5c_journal_mode_show(struct mddev * mddev,char * page)2525 static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2526 {
2527 	struct r5conf *conf;
2528 	int ret;
2529 
2530 	ret = mddev_lock(mddev);
2531 	if (ret)
2532 		return ret;
2533 
2534 	conf = mddev->private;
2535 	if (!conf || !conf->log)
2536 		goto out_unlock;
2537 
2538 	switch (conf->log->r5c_journal_mode) {
2539 	case R5C_JOURNAL_MODE_WRITE_THROUGH:
2540 		ret = snprintf(
2541 			page, PAGE_SIZE, "[%s] %s\n",
2542 			r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2543 			r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2544 		break;
2545 	case R5C_JOURNAL_MODE_WRITE_BACK:
2546 		ret = snprintf(
2547 			page, PAGE_SIZE, "%s [%s]\n",
2548 			r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2549 			r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2550 		break;
2551 	default:
2552 		ret = 0;
2553 	}
2554 
2555 out_unlock:
2556 	mddev_unlock(mddev);
2557 	return ret;
2558 }
2559 
2560 /*
2561  * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2562  *
2563  * @mode as defined in 'enum r5c_journal_mode'.
2564  *
2565  */
r5c_journal_mode_set(struct mddev * mddev,int mode)2566 int r5c_journal_mode_set(struct mddev *mddev, int mode)
2567 {
2568 	struct r5conf *conf;
2569 
2570 	if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2571 	    mode > R5C_JOURNAL_MODE_WRITE_BACK)
2572 		return -EINVAL;
2573 
2574 	conf = mddev->private;
2575 	if (!conf || !conf->log)
2576 		return -ENODEV;
2577 
2578 	if (raid5_calc_degraded(conf) > 0 &&
2579 	    mode == R5C_JOURNAL_MODE_WRITE_BACK)
2580 		return -EINVAL;
2581 
2582 	conf->log->r5c_journal_mode = mode;
2583 
2584 	pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2585 		 mdname(mddev), mode, r5c_journal_mode_str[mode]);
2586 	return 0;
2587 }
2588 EXPORT_SYMBOL(r5c_journal_mode_set);
2589 
r5c_journal_mode_store(struct mddev * mddev,const char * page,size_t length)2590 static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2591 				      const char *page, size_t length)
2592 {
2593 	int mode = ARRAY_SIZE(r5c_journal_mode_str);
2594 	size_t len = length;
2595 	int ret;
2596 
2597 	if (len < 2)
2598 		return -EINVAL;
2599 
2600 	if (page[len - 1] == '\n')
2601 		len--;
2602 
2603 	while (mode--)
2604 		if (strlen(r5c_journal_mode_str[mode]) == len &&
2605 		    !strncmp(page, r5c_journal_mode_str[mode], len))
2606 			break;
2607 	ret = mddev_suspend_and_lock(mddev);
2608 	if (ret)
2609 		return ret;
2610 	ret = r5c_journal_mode_set(mddev, mode);
2611 	mddev_unlock_and_resume(mddev);
2612 	return ret ?: length;
2613 }
2614 
2615 struct md_sysfs_entry
2616 r5c_journal_mode = __ATTR(journal_mode, 0644,
2617 			  r5c_journal_mode_show, r5c_journal_mode_store);
2618 
2619 /*
2620  * Try handle write operation in caching phase. This function should only
2621  * be called in write-back mode.
2622  *
2623  * If all outstanding writes can be handled in caching phase, returns 0
2624  * If writes requires write-out phase, call r5c_make_stripe_write_out()
2625  * and returns -EAGAIN
2626  */
r5c_try_caching_write(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s,int disks)2627 int r5c_try_caching_write(struct r5conf *conf,
2628 			  struct stripe_head *sh,
2629 			  struct stripe_head_state *s,
2630 			  int disks)
2631 {
2632 	struct r5l_log *log = READ_ONCE(conf->log);
2633 	int i;
2634 	struct r5dev *dev;
2635 	int to_cache = 0;
2636 	void __rcu **pslot;
2637 	sector_t tree_index;
2638 	int ret;
2639 	uintptr_t refcount;
2640 
2641 	BUG_ON(!r5c_is_writeback(log));
2642 
2643 	if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
2644 		/*
2645 		 * There are two different scenarios here:
2646 		 *  1. The stripe has some data cached, and it is sent to
2647 		 *     write-out phase for reclaim
2648 		 *  2. The stripe is clean, and this is the first write
2649 		 *
2650 		 * For 1, return -EAGAIN, so we continue with
2651 		 * handle_stripe_dirtying().
2652 		 *
2653 		 * For 2, set STRIPE_R5C_CACHING and continue with caching
2654 		 * write.
2655 		 */
2656 
2657 		/* case 1: anything injournal or anything in written */
2658 		if (s->injournal > 0 || s->written > 0)
2659 			return -EAGAIN;
2660 		/* case 2 */
2661 		set_bit(STRIPE_R5C_CACHING, &sh->state);
2662 	}
2663 
2664 	/*
2665 	 * When run in degraded mode, array is set to write-through mode.
2666 	 * This check helps drain pending write safely in the transition to
2667 	 * write-through mode.
2668 	 *
2669 	 * When a stripe is syncing, the write is also handled in write
2670 	 * through mode.
2671 	 */
2672 	if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
2673 		r5c_make_stripe_write_out(sh);
2674 		return -EAGAIN;
2675 	}
2676 
2677 	for (i = disks; i--; ) {
2678 		dev = &sh->dev[i];
2679 		/* if non-overwrite, use writing-out phase */
2680 		if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2681 		    !test_bit(R5_InJournal, &dev->flags)) {
2682 			r5c_make_stripe_write_out(sh);
2683 			return -EAGAIN;
2684 		}
2685 	}
2686 
2687 	/* if the stripe is not counted in big_stripe_tree, add it now */
2688 	if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
2689 	    !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2690 		tree_index = r5c_tree_index(conf, sh->sector);
2691 		spin_lock(&log->tree_lock);
2692 		pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2693 					       tree_index);
2694 		if (pslot) {
2695 			refcount = (uintptr_t)radix_tree_deref_slot_protected(
2696 				pslot, &log->tree_lock) >>
2697 				R5C_RADIX_COUNT_SHIFT;
2698 			radix_tree_replace_slot(
2699 				&log->big_stripe_tree, pslot,
2700 				(void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
2701 		} else {
2702 			/*
2703 			 * this radix_tree_insert can fail safely, so no
2704 			 * need to call radix_tree_preload()
2705 			 */
2706 			ret = radix_tree_insert(
2707 				&log->big_stripe_tree, tree_index,
2708 				(void *)(1 << R5C_RADIX_COUNT_SHIFT));
2709 			if (ret) {
2710 				spin_unlock(&log->tree_lock);
2711 				r5c_make_stripe_write_out(sh);
2712 				return -EAGAIN;
2713 			}
2714 		}
2715 		spin_unlock(&log->tree_lock);
2716 
2717 		/*
2718 		 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2719 		 * counted in the radix tree
2720 		 */
2721 		set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
2722 		atomic_inc(&conf->r5c_cached_partial_stripes);
2723 	}
2724 
2725 	for (i = disks; i--; ) {
2726 		dev = &sh->dev[i];
2727 		if (dev->towrite) {
2728 			set_bit(R5_Wantwrite, &dev->flags);
2729 			set_bit(R5_Wantdrain, &dev->flags);
2730 			set_bit(R5_LOCKED, &dev->flags);
2731 			to_cache++;
2732 		}
2733 	}
2734 
2735 	if (to_cache) {
2736 		set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2737 		/*
2738 		 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2739 		 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2740 		 * r5c_handle_data_cached()
2741 		 */
2742 		set_bit(STRIPE_LOG_TRAPPED, &sh->state);
2743 	}
2744 
2745 	return 0;
2746 }
2747 
2748 /*
2749  * free extra pages (orig_page) we allocated for prexor
2750  */
r5c_release_extra_page(struct stripe_head * sh)2751 void r5c_release_extra_page(struct stripe_head *sh)
2752 {
2753 	struct r5conf *conf = sh->raid_conf;
2754 	int i;
2755 	bool using_disk_info_extra_page;
2756 
2757 	using_disk_info_extra_page =
2758 		sh->dev[0].orig_page == conf->disks[0].extra_page;
2759 
2760 	for (i = sh->disks; i--; )
2761 		if (sh->dev[i].page != sh->dev[i].orig_page) {
2762 			struct page *p = sh->dev[i].orig_page;
2763 
2764 			sh->dev[i].orig_page = sh->dev[i].page;
2765 			clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2766 
2767 			if (!using_disk_info_extra_page)
2768 				put_page(p);
2769 		}
2770 
2771 	if (using_disk_info_extra_page) {
2772 		clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2773 		md_wakeup_thread(conf->mddev->thread);
2774 	}
2775 }
2776 
r5c_use_extra_page(struct stripe_head * sh)2777 void r5c_use_extra_page(struct stripe_head *sh)
2778 {
2779 	struct r5conf *conf = sh->raid_conf;
2780 	int i;
2781 	struct r5dev *dev;
2782 
2783 	for (i = sh->disks; i--; ) {
2784 		dev = &sh->dev[i];
2785 		if (dev->orig_page != dev->page)
2786 			put_page(dev->orig_page);
2787 		dev->orig_page = conf->disks[i].extra_page;
2788 	}
2789 }
2790 
2791 /*
2792  * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2793  * stripe is committed to RAID disks.
2794  */
r5c_finish_stripe_write_out(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s)2795 void r5c_finish_stripe_write_out(struct r5conf *conf,
2796 				 struct stripe_head *sh,
2797 				 struct stripe_head_state *s)
2798 {
2799 	struct r5l_log *log = READ_ONCE(conf->log);
2800 	int i;
2801 	sector_t tree_index;
2802 	void __rcu **pslot;
2803 	uintptr_t refcount;
2804 
2805 	if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2806 		return;
2807 
2808 	WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2809 	clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2810 
2811 	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2812 		return;
2813 
2814 	for (i = sh->disks; i--; ) {
2815 		clear_bit(R5_InJournal, &sh->dev[i].flags);
2816 		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2817 			wake_up_bit(&sh->dev[i].flags, R5_Overlap);
2818 	}
2819 
2820 	/*
2821 	 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2822 	 * We updated R5_InJournal, so we also update s->injournal.
2823 	 */
2824 	s->injournal = 0;
2825 
2826 	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2827 		if (atomic_dec_and_test(&conf->pending_full_writes))
2828 			md_wakeup_thread(conf->mddev->thread);
2829 
2830 	spin_lock_irq(&log->stripe_in_journal_lock);
2831 	list_del_init(&sh->r5c);
2832 	spin_unlock_irq(&log->stripe_in_journal_lock);
2833 	sh->log_start = MaxSector;
2834 
2835 	atomic_dec(&log->stripe_in_journal_count);
2836 	r5c_update_log_state(log);
2837 
2838 	/* stop counting this stripe in big_stripe_tree */
2839 	if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
2840 	    test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2841 		tree_index = r5c_tree_index(conf, sh->sector);
2842 		spin_lock(&log->tree_lock);
2843 		pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2844 					       tree_index);
2845 		BUG_ON(pslot == NULL);
2846 		refcount = (uintptr_t)radix_tree_deref_slot_protected(
2847 			pslot, &log->tree_lock) >>
2848 			R5C_RADIX_COUNT_SHIFT;
2849 		if (refcount == 1)
2850 			radix_tree_delete(&log->big_stripe_tree, tree_index);
2851 		else
2852 			radix_tree_replace_slot(
2853 				&log->big_stripe_tree, pslot,
2854 				(void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
2855 		spin_unlock(&log->tree_lock);
2856 	}
2857 
2858 	if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
2859 		BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
2860 		atomic_dec(&conf->r5c_flushing_partial_stripes);
2861 		atomic_dec(&conf->r5c_cached_partial_stripes);
2862 	}
2863 
2864 	if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2865 		BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
2866 		atomic_dec(&conf->r5c_flushing_full_stripes);
2867 		atomic_dec(&conf->r5c_cached_full_stripes);
2868 	}
2869 
2870 	r5l_append_flush_payload(log, sh->sector);
2871 	/* stripe is flused to raid disks, we can do resync now */
2872 	if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2873 		set_bit(STRIPE_HANDLE, &sh->state);
2874 }
2875 
r5c_cache_data(struct r5l_log * log,struct stripe_head * sh)2876 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2877 {
2878 	struct r5conf *conf = sh->raid_conf;
2879 	int pages = 0;
2880 	int reserve;
2881 	int i;
2882 	int ret = 0;
2883 
2884 	BUG_ON(!log);
2885 
2886 	for (i = 0; i < sh->disks; i++) {
2887 		void *addr;
2888 
2889 		if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2890 			continue;
2891 		addr = kmap_atomic(sh->dev[i].page);
2892 		sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2893 						    addr, PAGE_SIZE);
2894 		kunmap_atomic(addr);
2895 		pages++;
2896 	}
2897 	WARN_ON(pages == 0);
2898 
2899 	/*
2900 	 * The stripe must enter state machine again to call endio, so
2901 	 * don't delay.
2902 	 */
2903 	clear_bit(STRIPE_DELAYED, &sh->state);
2904 	atomic_inc(&sh->count);
2905 
2906 	mutex_lock(&log->io_mutex);
2907 	/* meta + data */
2908 	reserve = (1 + pages) << (PAGE_SHIFT - 9);
2909 
2910 	if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2911 	    sh->log_start == MaxSector)
2912 		r5l_add_no_space_stripe(log, sh);
2913 	else if (!r5l_has_free_space(log, reserve)) {
2914 		if (sh->log_start == log->last_checkpoint)
2915 			BUG();
2916 		else
2917 			r5l_add_no_space_stripe(log, sh);
2918 	} else {
2919 		ret = r5l_log_stripe(log, sh, pages, 0);
2920 		if (ret) {
2921 			spin_lock_irq(&log->io_list_lock);
2922 			list_add_tail(&sh->log_list, &log->no_mem_stripes);
2923 			spin_unlock_irq(&log->io_list_lock);
2924 		}
2925 	}
2926 
2927 	mutex_unlock(&log->io_mutex);
2928 	return 0;
2929 }
2930 
2931 /* check whether this big stripe is in write back cache. */
r5c_big_stripe_cached(struct r5conf * conf,sector_t sect)2932 bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
2933 {
2934 	struct r5l_log *log = READ_ONCE(conf->log);
2935 	sector_t tree_index;
2936 	void *slot;
2937 
2938 	if (!log)
2939 		return false;
2940 
2941 	tree_index = r5c_tree_index(conf, sect);
2942 	slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2943 	return slot != NULL;
2944 }
2945 
r5l_load_log(struct r5l_log * log)2946 static int r5l_load_log(struct r5l_log *log)
2947 {
2948 	struct md_rdev *rdev = log->rdev;
2949 	struct page *page;
2950 	struct r5l_meta_block *mb;
2951 	sector_t cp = log->rdev->journal_tail;
2952 	u32 stored_crc, expected_crc;
2953 	bool create_super = false;
2954 	int ret = 0;
2955 
2956 	/* Make sure it's valid */
2957 	if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2958 		cp = 0;
2959 	page = alloc_page(GFP_KERNEL);
2960 	if (!page)
2961 		return -ENOMEM;
2962 
2963 	if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
2964 		ret = -EIO;
2965 		goto ioerr;
2966 	}
2967 	mb = page_address(page);
2968 
2969 	if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2970 	    mb->version != R5LOG_VERSION) {
2971 		create_super = true;
2972 		goto create;
2973 	}
2974 	stored_crc = le32_to_cpu(mb->checksum);
2975 	mb->checksum = 0;
2976 	expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2977 	if (stored_crc != expected_crc) {
2978 		create_super = true;
2979 		goto create;
2980 	}
2981 	if (le64_to_cpu(mb->position) != cp) {
2982 		create_super = true;
2983 		goto create;
2984 	}
2985 create:
2986 	if (create_super) {
2987 		log->last_cp_seq = get_random_u32();
2988 		cp = 0;
2989 		r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
2990 		/*
2991 		 * Make sure super points to correct address. Log might have
2992 		 * data very soon. If super hasn't correct log tail address,
2993 		 * recovery can't find the log
2994 		 */
2995 		r5l_write_super(log, cp);
2996 	} else
2997 		log->last_cp_seq = le64_to_cpu(mb->seq);
2998 
2999 	log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
3000 	log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
3001 	if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
3002 		log->max_free_space = RECLAIM_MAX_FREE_SPACE;
3003 	log->last_checkpoint = cp;
3004 
3005 	__free_page(page);
3006 
3007 	if (create_super) {
3008 		log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3009 		log->seq = log->last_cp_seq + 1;
3010 		log->next_checkpoint = cp;
3011 	} else
3012 		ret = r5l_recovery_log(log);
3013 
3014 	r5c_update_log_state(log);
3015 	return ret;
3016 ioerr:
3017 	__free_page(page);
3018 	return ret;
3019 }
3020 
r5l_start(struct r5l_log * log)3021 int r5l_start(struct r5l_log *log)
3022 {
3023 	int ret;
3024 
3025 	if (!log)
3026 		return 0;
3027 
3028 	ret = r5l_load_log(log);
3029 	if (ret) {
3030 		struct mddev *mddev = log->rdev->mddev;
3031 		struct r5conf *conf = mddev->private;
3032 
3033 		r5l_exit_log(conf);
3034 	}
3035 	return ret;
3036 }
3037 
r5c_update_on_rdev_error(struct mddev * mddev,struct md_rdev * rdev)3038 void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
3039 {
3040 	struct r5conf *conf = mddev->private;
3041 	struct r5l_log *log = READ_ONCE(conf->log);
3042 
3043 	if (!log)
3044 		return;
3045 
3046 	if ((raid5_calc_degraded(conf) > 0 ||
3047 	     test_bit(Journal, &rdev->flags)) &&
3048 	    log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3049 		schedule_work(&log->disable_writeback_work);
3050 }
3051 
r5l_init_log(struct r5conf * conf,struct md_rdev * rdev)3052 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
3053 {
3054 	struct r5l_log *log;
3055 	struct md_thread *thread;
3056 	int ret;
3057 
3058 	pr_debug("md/raid:%s: using device %pg as journal\n",
3059 		 mdname(conf->mddev), rdev->bdev);
3060 
3061 	if (PAGE_SIZE != 4096)
3062 		return -EINVAL;
3063 
3064 	/*
3065 	 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3066 	 * raid_disks r5l_payload_data_parity.
3067 	 *
3068 	 * Write journal and cache does not work for very big array
3069 	 * (raid_disks > 203)
3070 	 */
3071 	if (sizeof(struct r5l_meta_block) +
3072 	    ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3073 	     conf->raid_disks) > PAGE_SIZE) {
3074 		pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3075 		       mdname(conf->mddev), conf->raid_disks);
3076 		return -EINVAL;
3077 	}
3078 
3079 	log = kzalloc(sizeof(*log), GFP_KERNEL);
3080 	if (!log)
3081 		return -ENOMEM;
3082 	log->rdev = rdev;
3083 	log->need_cache_flush = bdev_write_cache(rdev->bdev);
3084 	log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3085 				       sizeof(rdev->mddev->uuid));
3086 
3087 	mutex_init(&log->io_mutex);
3088 
3089 	spin_lock_init(&log->io_list_lock);
3090 	INIT_LIST_HEAD(&log->running_ios);
3091 	INIT_LIST_HEAD(&log->io_end_ios);
3092 	INIT_LIST_HEAD(&log->flushing_ios);
3093 	INIT_LIST_HEAD(&log->finished_ios);
3094 
3095 	log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3096 	if (!log->io_kc)
3097 		goto io_kc;
3098 
3099 	ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3100 	if (ret)
3101 		goto io_pool;
3102 
3103 	ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3104 	if (ret)
3105 		goto io_bs;
3106 
3107 	ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3108 	if (ret)
3109 		goto out_mempool;
3110 
3111 	spin_lock_init(&log->tree_lock);
3112 	INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3113 
3114 	thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
3115 				    "reclaim");
3116 	if (!thread)
3117 		goto reclaim_thread;
3118 
3119 	thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
3120 	rcu_assign_pointer(log->reclaim_thread, thread);
3121 
3122 	init_waitqueue_head(&log->iounit_wait);
3123 
3124 	INIT_LIST_HEAD(&log->no_mem_stripes);
3125 
3126 	INIT_LIST_HEAD(&log->no_space_stripes);
3127 	spin_lock_init(&log->no_space_stripes_lock);
3128 
3129 	INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3130 	INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3131 
3132 	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3133 	INIT_LIST_HEAD(&log->stripe_in_journal_list);
3134 	spin_lock_init(&log->stripe_in_journal_lock);
3135 	atomic_set(&log->stripe_in_journal_count, 0);
3136 
3137 	WRITE_ONCE(conf->log, log);
3138 
3139 	set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
3140 	return 0;
3141 
3142 reclaim_thread:
3143 	mempool_exit(&log->meta_pool);
3144 out_mempool:
3145 	bioset_exit(&log->bs);
3146 io_bs:
3147 	mempool_exit(&log->io_pool);
3148 io_pool:
3149 	kmem_cache_destroy(log->io_kc);
3150 io_kc:
3151 	kfree(log);
3152 	return -EINVAL;
3153 }
3154 
r5l_exit_log(struct r5conf * conf)3155 void r5l_exit_log(struct r5conf *conf)
3156 {
3157 	struct r5l_log *log = conf->log;
3158 
3159 	md_unregister_thread(conf->mddev, &log->reclaim_thread);
3160 
3161 	/*
3162 	 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3163 	 * ensure disable_writeback_work wakes up and exits.
3164 	 */
3165 	WRITE_ONCE(conf->log, NULL);
3166 	wake_up(&conf->mddev->sb_wait);
3167 	flush_work(&log->disable_writeback_work);
3168 
3169 	mempool_exit(&log->meta_pool);
3170 	bioset_exit(&log->bs);
3171 	mempool_exit(&log->io_pool);
3172 	kmem_cache_destroy(log->io_kc);
3173 	kfree(log);
3174 }
3175