xref: /linux/drivers/md/raid5-ppl.c (revision a5359ddd052860bacf957e65fe819c63e974b3a6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Partial Parity Log for closing the RAID5 write hole
4  * Copyright (c) 2017, Intel Corporation.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/crc32c.h>
11 #include <linux/async_tx.h>
12 #include <linux/raid/md_p.h>
13 #include "md.h"
14 #include "raid5.h"
15 #include "raid5-log.h"
16 
17 /*
18  * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
19  * partial parity data. The header contains an array of entries
20  * (struct ppl_header_entry) which describe the logged write requests.
21  * Partial parity for the entries comes after the header, written in the same
22  * sequence as the entries:
23  *
24  * Header
25  *   entry0
26  *   ...
27  *   entryN
28  * PP data
29  *   PP for entry0
30  *   ...
31  *   PP for entryN
32  *
33  * An entry describes one or more consecutive stripe_heads, up to a full
34  * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
35  * number of stripe_heads in the entry and n is the number of modified data
36  * disks. Every stripe_head in the entry must write to the same data disks.
37  * An example of a valid case described by a single entry (writes to the first
38  * stripe of a 4 disk array, 16k chunk size):
39  *
40  * sh->sector   dd0   dd1   dd2    ppl
41  *            +-----+-----+-----+
42  * 0          | --- | --- | --- | +----+
43  * 8          | -W- | -W- | --- | | pp |   data_sector = 8
44  * 16         | -W- | -W- | --- | | pp |   data_size = 3 * 2 * 4k
45  * 24         | -W- | -W- | --- | | pp |   pp_size = 3 * 4k
46  *            +-----+-----+-----+ +----+
47  *
48  * data_sector is the first raid sector of the modified data, data_size is the
49  * total size of modified data and pp_size is the size of partial parity for
50  * this entry. Entries for full stripe writes contain no partial parity
51  * (pp_size = 0), they only mark the stripes for which parity should be
52  * recalculated after an unclean shutdown. Every entry holds a checksum of its
53  * partial parity, the header also has a checksum of the header itself.
54  *
55  * A write request is always logged to the PPL instance stored on the parity
56  * disk of the corresponding stripe. For each member disk there is one ppl_log
57  * used to handle logging for this disk, independently from others. They are
58  * grouped in child_logs array in struct ppl_conf, which is assigned to
59  * r5conf->log_private.
60  *
61  * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
62  * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
63  * can be appended to the last entry if it meets the conditions for a valid
64  * entry described above, otherwise a new entry is added. Checksums of entries
65  * are calculated incrementally as stripes containing partial parity are being
66  * added. ppl_submit_iounit() calculates the checksum of the header and submits
67  * a bio containing the header page and partial parity pages (sh->ppl_page) for
68  * all stripes of the io_unit. When the PPL write completes, the stripes
69  * associated with the io_unit are released and raid5d starts writing their data
70  * and parity. When all stripes are written, the io_unit is freed and the next
71  * can be submitted.
72  *
73  * An io_unit is used to gather stripes until it is submitted or becomes full
74  * (if the maximum number of entries or size of PPL is reached). Another io_unit
75  * can't be submitted until the previous has completed (PPL and stripe
76  * data+parity is written). The log->io_list tracks all io_units of a log
77  * (for a single member disk). New io_units are added to the end of the list
78  * and the first io_unit is submitted, if it is not submitted already.
79  * The current io_unit accepting new stripes is always at the end of the list.
80  *
81  * If write-back cache is enabled for any of the disks in the array, its data
82  * must be flushed before next io_unit is submitted.
83  */
84 
85 #define PPL_SPACE_SIZE (128 * 1024)
86 
87 struct ppl_conf {
88 	struct mddev *mddev;
89 
90 	/* array of child logs, one for each raid disk */
91 	struct ppl_log *child_logs;
92 	int count;
93 
94 	int block_size;		/* the logical block size used for data_sector
95 				 * in ppl_header_entry */
96 	u32 signature;		/* raid array identifier */
97 	atomic64_t seq;		/* current log write sequence number */
98 
99 	struct kmem_cache *io_kc;
100 	mempool_t io_pool;
101 	struct bio_set bs;
102 	struct bio_set flush_bs;
103 
104 	/* used only for recovery */
105 	int recovered_entries;
106 	int mismatch_count;
107 
108 	/* stripes to retry if failed to allocate io_unit */
109 	struct list_head no_mem_stripes;
110 	spinlock_t no_mem_stripes_lock;
111 
112 	unsigned short write_hint;
113 };
114 
115 struct ppl_log {
116 	struct ppl_conf *ppl_conf;	/* shared between all log instances */
117 
118 	struct md_rdev *rdev;		/* array member disk associated with
119 					 * this log instance */
120 	struct mutex io_mutex;
121 	struct ppl_io_unit *current_io;	/* current io_unit accepting new data
122 					 * always at the end of io_list */
123 	spinlock_t io_list_lock;
124 	struct list_head io_list;	/* all io_units of this log */
125 
126 	sector_t next_io_sector;
127 	unsigned int entry_space;
128 	bool use_multippl;
129 	bool wb_cache_on;
130 	unsigned long disk_flush_bitmap;
131 };
132 
133 #define PPL_IO_INLINE_BVECS 32
134 
135 struct ppl_io_unit {
136 	struct ppl_log *log;
137 
138 	struct page *header_page;	/* for ppl_header */
139 
140 	unsigned int entries_count;	/* number of entries in ppl_header */
141 	unsigned int pp_size;		/* total size current of partial parity */
142 
143 	u64 seq;			/* sequence number of this log write */
144 	struct list_head log_sibling;	/* log->io_list */
145 
146 	struct list_head stripe_list;	/* stripes added to the io_unit */
147 	atomic_t pending_stripes;	/* how many stripes not written to raid */
148 	atomic_t pending_flushes;	/* how many disk flushes are in progress */
149 
150 	bool submitted;			/* true if write to log started */
151 
152 	/* inline bio and its biovec for submitting the iounit */
153 	struct bio bio;
154 	struct bio_vec biovec[PPL_IO_INLINE_BVECS];
155 };
156 
157 struct dma_async_tx_descriptor *
158 ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
159 		       struct dma_async_tx_descriptor *tx)
160 {
161 	int disks = sh->disks;
162 	struct page **srcs = percpu->scribble;
163 	int count = 0, pd_idx = sh->pd_idx, i;
164 	struct async_submit_ctl submit;
165 
166 	pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
167 
168 	/*
169 	 * Partial parity is the XOR of stripe data chunks that are not changed
170 	 * during the write request. Depending on available data
171 	 * (read-modify-write vs. reconstruct-write case) we calculate it
172 	 * differently.
173 	 */
174 	if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
175 		/*
176 		 * rmw: xor old data and parity from updated disks
177 		 * This is calculated earlier by ops_run_prexor5() so just copy
178 		 * the parity dev page.
179 		 */
180 		srcs[count++] = sh->dev[pd_idx].page;
181 	} else if (sh->reconstruct_state == reconstruct_state_drain_run) {
182 		/* rcw: xor data from all not updated disks */
183 		for (i = disks; i--;) {
184 			struct r5dev *dev = &sh->dev[i];
185 			if (test_bit(R5_UPTODATE, &dev->flags))
186 				srcs[count++] = dev->page;
187 		}
188 	} else {
189 		return tx;
190 	}
191 
192 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
193 			  NULL, sh, (void *) (srcs + sh->disks + 2));
194 
195 	if (count == 1)
196 		tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
197 				  &submit);
198 	else
199 		tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
200 			       &submit);
201 
202 	return tx;
203 }
204 
205 static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
206 {
207 	struct kmem_cache *kc = pool_data;
208 	struct ppl_io_unit *io;
209 
210 	io = kmem_cache_alloc(kc, gfp_mask);
211 	if (!io)
212 		return NULL;
213 
214 	io->header_page = alloc_page(gfp_mask);
215 	if (!io->header_page) {
216 		kmem_cache_free(kc, io);
217 		return NULL;
218 	}
219 
220 	return io;
221 }
222 
223 static void ppl_io_pool_free(void *element, void *pool_data)
224 {
225 	struct kmem_cache *kc = pool_data;
226 	struct ppl_io_unit *io = element;
227 
228 	__free_page(io->header_page);
229 	kmem_cache_free(kc, io);
230 }
231 
232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
233 					  struct stripe_head *sh)
234 {
235 	struct ppl_conf *ppl_conf = log->ppl_conf;
236 	struct ppl_io_unit *io;
237 	struct ppl_header *pplhdr;
238 	struct page *header_page;
239 
240 	io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
241 	if (!io)
242 		return NULL;
243 
244 	header_page = io->header_page;
245 	memset(io, 0, sizeof(*io));
246 	io->header_page = header_page;
247 
248 	io->log = log;
249 	INIT_LIST_HEAD(&io->log_sibling);
250 	INIT_LIST_HEAD(&io->stripe_list);
251 	atomic_set(&io->pending_stripes, 0);
252 	atomic_set(&io->pending_flushes, 0);
253 	bio_init(&io->bio, NULL, io->biovec, PPL_IO_INLINE_BVECS, 0);
254 
255 	pplhdr = page_address(io->header_page);
256 	clear_page(pplhdr);
257 	memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
258 	pplhdr->signature = cpu_to_le32(ppl_conf->signature);
259 
260 	io->seq = atomic64_add_return(1, &ppl_conf->seq);
261 	pplhdr->generation = cpu_to_le64(io->seq);
262 
263 	return io;
264 }
265 
266 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
267 {
268 	struct ppl_io_unit *io = log->current_io;
269 	struct ppl_header_entry *e = NULL;
270 	struct ppl_header *pplhdr;
271 	int i;
272 	sector_t data_sector = 0;
273 	int data_disks = 0;
274 	struct r5conf *conf = sh->raid_conf;
275 
276 	pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
277 
278 	/* check if current io_unit is full */
279 	if (io && (io->pp_size == log->entry_space ||
280 		   io->entries_count == PPL_HDR_MAX_ENTRIES)) {
281 		pr_debug("%s: add io_unit blocked by seq: %llu\n",
282 			 __func__, io->seq);
283 		io = NULL;
284 	}
285 
286 	/* add a new unit if there is none or the current is full */
287 	if (!io) {
288 		io = ppl_new_iounit(log, sh);
289 		if (!io)
290 			return -ENOMEM;
291 		spin_lock_irq(&log->io_list_lock);
292 		list_add_tail(&io->log_sibling, &log->io_list);
293 		spin_unlock_irq(&log->io_list_lock);
294 
295 		log->current_io = io;
296 	}
297 
298 	for (i = 0; i < sh->disks; i++) {
299 		struct r5dev *dev = &sh->dev[i];
300 
301 		if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
302 			if (!data_disks || dev->sector < data_sector)
303 				data_sector = dev->sector;
304 			data_disks++;
305 		}
306 	}
307 	BUG_ON(!data_disks);
308 
309 	pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
310 		 io->seq, (unsigned long long)data_sector, data_disks);
311 
312 	pplhdr = page_address(io->header_page);
313 
314 	if (io->entries_count > 0) {
315 		struct ppl_header_entry *last =
316 				&pplhdr->entries[io->entries_count - 1];
317 		struct stripe_head *sh_last = list_last_entry(
318 				&io->stripe_list, struct stripe_head, log_list);
319 		u64 data_sector_last = le64_to_cpu(last->data_sector);
320 		u32 data_size_last = le32_to_cpu(last->data_size);
321 
322 		/*
323 		 * Check if we can append the stripe to the last entry. It must
324 		 * be just after the last logged stripe and write to the same
325 		 * disks. Use bit shift and logarithm to avoid 64-bit division.
326 		 */
327 		if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) &&
328 		    (data_sector >> ilog2(conf->chunk_sectors) ==
329 		     data_sector_last >> ilog2(conf->chunk_sectors)) &&
330 		    ((data_sector - data_sector_last) * data_disks ==
331 		     data_size_last >> 9))
332 			e = last;
333 	}
334 
335 	if (!e) {
336 		e = &pplhdr->entries[io->entries_count++];
337 		e->data_sector = cpu_to_le64(data_sector);
338 		e->parity_disk = cpu_to_le32(sh->pd_idx);
339 		e->checksum = cpu_to_le32(~0);
340 	}
341 
342 	le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
343 
344 	/* don't write any PP if full stripe write */
345 	if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
346 		le32_add_cpu(&e->pp_size, PAGE_SIZE);
347 		io->pp_size += PAGE_SIZE;
348 		e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
349 						    page_address(sh->ppl_page),
350 						    PAGE_SIZE));
351 	}
352 
353 	list_add_tail(&sh->log_list, &io->stripe_list);
354 	atomic_inc(&io->pending_stripes);
355 	sh->ppl_io = io;
356 
357 	return 0;
358 }
359 
360 int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
361 {
362 	struct ppl_conf *ppl_conf = conf->log_private;
363 	struct ppl_io_unit *io = sh->ppl_io;
364 	struct ppl_log *log;
365 
366 	if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
367 	    !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
368 	    !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
369 		clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
370 		return -EAGAIN;
371 	}
372 
373 	log = &ppl_conf->child_logs[sh->pd_idx];
374 
375 	mutex_lock(&log->io_mutex);
376 
377 	if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
378 		mutex_unlock(&log->io_mutex);
379 		return -EAGAIN;
380 	}
381 
382 	set_bit(STRIPE_LOG_TRAPPED, &sh->state);
383 	clear_bit(STRIPE_DELAYED, &sh->state);
384 	atomic_inc(&sh->count);
385 
386 	if (ppl_log_stripe(log, sh)) {
387 		spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
388 		list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
389 		spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
390 	}
391 
392 	mutex_unlock(&log->io_mutex);
393 
394 	return 0;
395 }
396 
397 static void ppl_log_endio(struct bio *bio)
398 {
399 	struct ppl_io_unit *io = bio->bi_private;
400 	struct ppl_log *log = io->log;
401 	struct ppl_conf *ppl_conf = log->ppl_conf;
402 	struct stripe_head *sh, *next;
403 
404 	pr_debug("%s: seq: %llu\n", __func__, io->seq);
405 
406 	if (bio->bi_status)
407 		md_error(ppl_conf->mddev, log->rdev);
408 
409 	list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
410 		list_del_init(&sh->log_list);
411 
412 		set_bit(STRIPE_HANDLE, &sh->state);
413 		raid5_release_stripe(sh);
414 	}
415 }
416 
417 static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
418 {
419 	char b[BDEVNAME_SIZE];
420 
421 	pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
422 		 __func__, io->seq, bio->bi_iter.bi_size,
423 		 (unsigned long long)bio->bi_iter.bi_sector,
424 		 bio_devname(bio, b));
425 
426 	submit_bio(bio);
427 }
428 
429 static void ppl_submit_iounit(struct ppl_io_unit *io)
430 {
431 	struct ppl_log *log = io->log;
432 	struct ppl_conf *ppl_conf = log->ppl_conf;
433 	struct ppl_header *pplhdr = page_address(io->header_page);
434 	struct bio *bio = &io->bio;
435 	struct stripe_head *sh;
436 	int i;
437 
438 	bio->bi_private = io;
439 
440 	if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
441 		ppl_log_endio(bio);
442 		return;
443 	}
444 
445 	for (i = 0; i < io->entries_count; i++) {
446 		struct ppl_header_entry *e = &pplhdr->entries[i];
447 
448 		pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
449 			 __func__, io->seq, i, le64_to_cpu(e->data_sector),
450 			 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
451 
452 		e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
453 					     ilog2(ppl_conf->block_size >> 9));
454 		e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
455 	}
456 
457 	pplhdr->entries_count = cpu_to_le32(io->entries_count);
458 	pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
459 
460 	/* Rewind the buffer if current PPL is larger then remaining space */
461 	if (log->use_multippl &&
462 	    log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
463 	    (PPL_HEADER_SIZE + io->pp_size) >> 9)
464 		log->next_io_sector = log->rdev->ppl.sector;
465 
466 
467 	bio->bi_end_io = ppl_log_endio;
468 	bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
469 	bio_set_dev(bio, log->rdev->bdev);
470 	bio->bi_iter.bi_sector = log->next_io_sector;
471 	bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
472 	bio->bi_write_hint = ppl_conf->write_hint;
473 
474 	pr_debug("%s: log->current_io_sector: %llu\n", __func__,
475 	    (unsigned long long)log->next_io_sector);
476 
477 	if (log->use_multippl)
478 		log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
479 
480 	WARN_ON(log->disk_flush_bitmap != 0);
481 
482 	list_for_each_entry(sh, &io->stripe_list, log_list) {
483 		for (i = 0; i < sh->disks; i++) {
484 			struct r5dev *dev = &sh->dev[i];
485 
486 			if ((ppl_conf->child_logs[i].wb_cache_on) &&
487 			    (test_bit(R5_Wantwrite, &dev->flags))) {
488 				set_bit(i, &log->disk_flush_bitmap);
489 			}
490 		}
491 
492 		/* entries for full stripe writes have no partial parity */
493 		if (test_bit(STRIPE_FULL_WRITE, &sh->state))
494 			continue;
495 
496 		if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
497 			struct bio *prev = bio;
498 
499 			bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
500 					       prev->bi_opf, GFP_NOIO,
501 					       &ppl_conf->bs);
502 			bio->bi_write_hint = prev->bi_write_hint;
503 			bio->bi_iter.bi_sector = bio_end_sector(prev);
504 			bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
505 
506 			bio_chain(bio, prev);
507 			ppl_submit_iounit_bio(io, prev);
508 		}
509 	}
510 
511 	ppl_submit_iounit_bio(io, bio);
512 }
513 
514 static void ppl_submit_current_io(struct ppl_log *log)
515 {
516 	struct ppl_io_unit *io;
517 
518 	spin_lock_irq(&log->io_list_lock);
519 
520 	io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
521 				      log_sibling);
522 	if (io && io->submitted)
523 		io = NULL;
524 
525 	spin_unlock_irq(&log->io_list_lock);
526 
527 	if (io) {
528 		io->submitted = true;
529 
530 		if (io == log->current_io)
531 			log->current_io = NULL;
532 
533 		ppl_submit_iounit(io);
534 	}
535 }
536 
537 void ppl_write_stripe_run(struct r5conf *conf)
538 {
539 	struct ppl_conf *ppl_conf = conf->log_private;
540 	struct ppl_log *log;
541 	int i;
542 
543 	for (i = 0; i < ppl_conf->count; i++) {
544 		log = &ppl_conf->child_logs[i];
545 
546 		mutex_lock(&log->io_mutex);
547 		ppl_submit_current_io(log);
548 		mutex_unlock(&log->io_mutex);
549 	}
550 }
551 
552 static void ppl_io_unit_finished(struct ppl_io_unit *io)
553 {
554 	struct ppl_log *log = io->log;
555 	struct ppl_conf *ppl_conf = log->ppl_conf;
556 	struct r5conf *conf = ppl_conf->mddev->private;
557 	unsigned long flags;
558 
559 	pr_debug("%s: seq: %llu\n", __func__, io->seq);
560 
561 	local_irq_save(flags);
562 
563 	spin_lock(&log->io_list_lock);
564 	list_del(&io->log_sibling);
565 	spin_unlock(&log->io_list_lock);
566 
567 	mempool_free(io, &ppl_conf->io_pool);
568 
569 	spin_lock(&ppl_conf->no_mem_stripes_lock);
570 	if (!list_empty(&ppl_conf->no_mem_stripes)) {
571 		struct stripe_head *sh;
572 
573 		sh = list_first_entry(&ppl_conf->no_mem_stripes,
574 				      struct stripe_head, log_list);
575 		list_del_init(&sh->log_list);
576 		set_bit(STRIPE_HANDLE, &sh->state);
577 		raid5_release_stripe(sh);
578 	}
579 	spin_unlock(&ppl_conf->no_mem_stripes_lock);
580 
581 	local_irq_restore(flags);
582 
583 	wake_up(&conf->wait_for_quiescent);
584 }
585 
586 static void ppl_flush_endio(struct bio *bio)
587 {
588 	struct ppl_io_unit *io = bio->bi_private;
589 	struct ppl_log *log = io->log;
590 	struct ppl_conf *ppl_conf = log->ppl_conf;
591 	struct r5conf *conf = ppl_conf->mddev->private;
592 	char b[BDEVNAME_SIZE];
593 
594 	pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
595 
596 	if (bio->bi_status) {
597 		struct md_rdev *rdev;
598 
599 		rcu_read_lock();
600 		rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
601 		if (rdev)
602 			md_error(rdev->mddev, rdev);
603 		rcu_read_unlock();
604 	}
605 
606 	bio_put(bio);
607 
608 	if (atomic_dec_and_test(&io->pending_flushes)) {
609 		ppl_io_unit_finished(io);
610 		md_wakeup_thread(conf->mddev->thread);
611 	}
612 }
613 
614 static void ppl_do_flush(struct ppl_io_unit *io)
615 {
616 	struct ppl_log *log = io->log;
617 	struct ppl_conf *ppl_conf = log->ppl_conf;
618 	struct r5conf *conf = ppl_conf->mddev->private;
619 	int raid_disks = conf->raid_disks;
620 	int flushed_disks = 0;
621 	int i;
622 
623 	atomic_set(&io->pending_flushes, raid_disks);
624 
625 	for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
626 		struct md_rdev *rdev;
627 		struct block_device *bdev = NULL;
628 
629 		rcu_read_lock();
630 		rdev = rcu_dereference(conf->disks[i].rdev);
631 		if (rdev && !test_bit(Faulty, &rdev->flags))
632 			bdev = rdev->bdev;
633 		rcu_read_unlock();
634 
635 		if (bdev) {
636 			struct bio *bio;
637 			char b[BDEVNAME_SIZE];
638 
639 			bio = bio_alloc_bioset(bdev, 0, GFP_NOIO,
640 					       REQ_OP_WRITE | REQ_PREFLUSH,
641 					       &ppl_conf->flush_bs);
642 			bio->bi_private = io;
643 			bio->bi_end_io = ppl_flush_endio;
644 
645 			pr_debug("%s: dev: %s\n", __func__,
646 				 bio_devname(bio, b));
647 
648 			submit_bio(bio);
649 			flushed_disks++;
650 		}
651 	}
652 
653 	log->disk_flush_bitmap = 0;
654 
655 	for (i = flushed_disks ; i < raid_disks; i++) {
656 		if (atomic_dec_and_test(&io->pending_flushes))
657 			ppl_io_unit_finished(io);
658 	}
659 }
660 
661 static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
662 					    struct ppl_log *log)
663 {
664 	struct ppl_io_unit *io;
665 
666 	io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
667 				      log_sibling);
668 
669 	return !io || !io->submitted;
670 }
671 
672 void ppl_quiesce(struct r5conf *conf, int quiesce)
673 {
674 	struct ppl_conf *ppl_conf = conf->log_private;
675 	int i;
676 
677 	if (quiesce) {
678 		for (i = 0; i < ppl_conf->count; i++) {
679 			struct ppl_log *log = &ppl_conf->child_logs[i];
680 
681 			spin_lock_irq(&log->io_list_lock);
682 			wait_event_lock_irq(conf->wait_for_quiescent,
683 					    ppl_no_io_unit_submitted(conf, log),
684 					    log->io_list_lock);
685 			spin_unlock_irq(&log->io_list_lock);
686 		}
687 	}
688 }
689 
690 int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
691 {
692 	if (bio->bi_iter.bi_size == 0) {
693 		bio_endio(bio);
694 		return 0;
695 	}
696 	bio->bi_opf &= ~REQ_PREFLUSH;
697 	return -EAGAIN;
698 }
699 
700 void ppl_stripe_write_finished(struct stripe_head *sh)
701 {
702 	struct ppl_io_unit *io;
703 
704 	io = sh->ppl_io;
705 	sh->ppl_io = NULL;
706 
707 	if (io && atomic_dec_and_test(&io->pending_stripes)) {
708 		if (io->log->disk_flush_bitmap)
709 			ppl_do_flush(io);
710 		else
711 			ppl_io_unit_finished(io);
712 	}
713 }
714 
715 static void ppl_xor(int size, struct page *page1, struct page *page2)
716 {
717 	struct async_submit_ctl submit;
718 	struct dma_async_tx_descriptor *tx;
719 	struct page *xor_srcs[] = { page1, page2 };
720 
721 	init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
722 			  NULL, NULL, NULL, NULL);
723 	tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
724 
725 	async_tx_quiesce(&tx);
726 }
727 
728 /*
729  * PPL recovery strategy: xor partial parity and data from all modified data
730  * disks within a stripe and write the result as the new stripe parity. If all
731  * stripe data disks are modified (full stripe write), no partial parity is
732  * available, so just xor the data disks.
733  *
734  * Recovery of a PPL entry shall occur only if all modified data disks are
735  * available and read from all of them succeeds.
736  *
737  * A PPL entry applies to a stripe, partial parity size for an entry is at most
738  * the size of the chunk. Examples of possible cases for a single entry:
739  *
740  * case 0: single data disk write:
741  *   data0    data1    data2     ppl        parity
742  * +--------+--------+--------+           +--------------------+
743  * | ------ | ------ | ------ | +----+    | (no change)        |
744  * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
745  * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
746  * | ------ | ------ | ------ | +----+    | (no change)        |
747  * +--------+--------+--------+           +--------------------+
748  * pp_size = data_size
749  *
750  * case 1: more than one data disk write:
751  *   data0    data1    data2     ppl        parity
752  * +--------+--------+--------+           +--------------------+
753  * | ------ | ------ | ------ | +----+    | (no change)        |
754  * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
755  * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
756  * | ------ | ------ | ------ | +----+    | (no change)        |
757  * +--------+--------+--------+           +--------------------+
758  * pp_size = data_size / modified_data_disks
759  *
760  * case 2: write to all data disks (also full stripe write):
761  *   data0    data1    data2                parity
762  * +--------+--------+--------+           +--------------------+
763  * | ------ | ------ | ------ |           | (no change)        |
764  * | -data- | -data- | -data- | --------> | xor all data       |
765  * | ------ | ------ | ------ | --------> | (no change)        |
766  * | ------ | ------ | ------ |           | (no change)        |
767  * +--------+--------+--------+           +--------------------+
768  * pp_size = 0
769  *
770  * The following cases are possible only in other implementations. The recovery
771  * code can handle them, but they are not generated at runtime because they can
772  * be reduced to cases 0, 1 and 2:
773  *
774  * case 3:
775  *   data0    data1    data2     ppl        parity
776  * +--------+--------+--------+ +----+    +--------------------+
777  * | ------ | -data- | -data- | | pp |    | data1 ^ data2 ^ pp |
778  * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
779  * | -data- | -data- | -data- | | -- | -> | xor all data       |
780  * | -data- | -data- | ------ | | pp |    | data0 ^ data1 ^ pp |
781  * +--------+--------+--------+ +----+    +--------------------+
782  * pp_size = chunk_size
783  *
784  * case 4:
785  *   data0    data1    data2     ppl        parity
786  * +--------+--------+--------+ +----+    +--------------------+
787  * | ------ | -data- | ------ | | pp |    | data1 ^ pp         |
788  * | ------ | ------ | ------ | | -- | -> | (no change)        |
789  * | ------ | ------ | ------ | | -- | -> | (no change)        |
790  * | -data- | ------ | ------ | | pp |    | data0 ^ pp         |
791  * +--------+--------+--------+ +----+    +--------------------+
792  * pp_size = chunk_size
793  */
794 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
795 			     sector_t ppl_sector)
796 {
797 	struct ppl_conf *ppl_conf = log->ppl_conf;
798 	struct mddev *mddev = ppl_conf->mddev;
799 	struct r5conf *conf = mddev->private;
800 	int block_size = ppl_conf->block_size;
801 	struct page *page1;
802 	struct page *page2;
803 	sector_t r_sector_first;
804 	sector_t r_sector_last;
805 	int strip_sectors;
806 	int data_disks;
807 	int i;
808 	int ret = 0;
809 	char b[BDEVNAME_SIZE];
810 	unsigned int pp_size = le32_to_cpu(e->pp_size);
811 	unsigned int data_size = le32_to_cpu(e->data_size);
812 
813 	page1 = alloc_page(GFP_KERNEL);
814 	page2 = alloc_page(GFP_KERNEL);
815 
816 	if (!page1 || !page2) {
817 		ret = -ENOMEM;
818 		goto out;
819 	}
820 
821 	r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
822 
823 	if ((pp_size >> 9) < conf->chunk_sectors) {
824 		if (pp_size > 0) {
825 			data_disks = data_size / pp_size;
826 			strip_sectors = pp_size >> 9;
827 		} else {
828 			data_disks = conf->raid_disks - conf->max_degraded;
829 			strip_sectors = (data_size >> 9) / data_disks;
830 		}
831 		r_sector_last = r_sector_first +
832 				(data_disks - 1) * conf->chunk_sectors +
833 				strip_sectors;
834 	} else {
835 		data_disks = conf->raid_disks - conf->max_degraded;
836 		strip_sectors = conf->chunk_sectors;
837 		r_sector_last = r_sector_first + (data_size >> 9);
838 	}
839 
840 	pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
841 		 (unsigned long long)r_sector_first,
842 		 (unsigned long long)r_sector_last);
843 
844 	/* if start and end is 4k aligned, use a 4k block */
845 	if (block_size == 512 &&
846 	    (r_sector_first & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0 &&
847 	    (r_sector_last & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0)
848 		block_size = RAID5_STRIPE_SIZE(conf);
849 
850 	/* iterate through blocks in strip */
851 	for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
852 		bool update_parity = false;
853 		sector_t parity_sector;
854 		struct md_rdev *parity_rdev;
855 		struct stripe_head sh;
856 		int disk;
857 		int indent = 0;
858 
859 		pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
860 		indent += 2;
861 
862 		memset(page_address(page1), 0, PAGE_SIZE);
863 
864 		/* iterate through data member disks */
865 		for (disk = 0; disk < data_disks; disk++) {
866 			int dd_idx;
867 			struct md_rdev *rdev;
868 			sector_t sector;
869 			sector_t r_sector = r_sector_first + i +
870 					    (disk * conf->chunk_sectors);
871 
872 			pr_debug("%s:%*s data member disk %d start\n",
873 				 __func__, indent, "", disk);
874 			indent += 2;
875 
876 			if (r_sector >= r_sector_last) {
877 				pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
878 					 __func__, indent, "",
879 					 (unsigned long long)r_sector);
880 				indent -= 2;
881 				continue;
882 			}
883 
884 			update_parity = true;
885 
886 			/* map raid sector to member disk */
887 			sector = raid5_compute_sector(conf, r_sector, 0,
888 						      &dd_idx, NULL);
889 			pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
890 				 __func__, indent, "",
891 				 (unsigned long long)r_sector, dd_idx,
892 				 (unsigned long long)sector);
893 
894 			rdev = conf->disks[dd_idx].rdev;
895 			if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
896 				      sector >= rdev->recovery_offset)) {
897 				pr_debug("%s:%*s data member disk %d missing\n",
898 					 __func__, indent, "", dd_idx);
899 				update_parity = false;
900 				break;
901 			}
902 
903 			pr_debug("%s:%*s reading data member disk %s sector %llu\n",
904 				 __func__, indent, "", bdevname(rdev->bdev, b),
905 				 (unsigned long long)sector);
906 			if (!sync_page_io(rdev, sector, block_size, page2,
907 					REQ_OP_READ, 0, false)) {
908 				md_error(mddev, rdev);
909 				pr_debug("%s:%*s read failed!\n", __func__,
910 					 indent, "");
911 				ret = -EIO;
912 				goto out;
913 			}
914 
915 			ppl_xor(block_size, page1, page2);
916 
917 			indent -= 2;
918 		}
919 
920 		if (!update_parity)
921 			continue;
922 
923 		if (pp_size > 0) {
924 			pr_debug("%s:%*s reading pp disk sector %llu\n",
925 				 __func__, indent, "",
926 				 (unsigned long long)(ppl_sector + i));
927 			if (!sync_page_io(log->rdev,
928 					ppl_sector - log->rdev->data_offset + i,
929 					block_size, page2, REQ_OP_READ, 0,
930 					false)) {
931 				pr_debug("%s:%*s read failed!\n", __func__,
932 					 indent, "");
933 				md_error(mddev, log->rdev);
934 				ret = -EIO;
935 				goto out;
936 			}
937 
938 			ppl_xor(block_size, page1, page2);
939 		}
940 
941 		/* map raid sector to parity disk */
942 		parity_sector = raid5_compute_sector(conf, r_sector_first + i,
943 				0, &disk, &sh);
944 		BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
945 		parity_rdev = conf->disks[sh.pd_idx].rdev;
946 
947 		BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
948 		pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
949 			 __func__, indent, "",
950 			 (unsigned long long)parity_sector,
951 			 bdevname(parity_rdev->bdev, b));
952 		if (!sync_page_io(parity_rdev, parity_sector, block_size,
953 				page1, REQ_OP_WRITE, 0, false)) {
954 			pr_debug("%s:%*s parity write error!\n", __func__,
955 				 indent, "");
956 			md_error(mddev, parity_rdev);
957 			ret = -EIO;
958 			goto out;
959 		}
960 	}
961 out:
962 	if (page1)
963 		__free_page(page1);
964 	if (page2)
965 		__free_page(page2);
966 	return ret;
967 }
968 
969 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
970 		       sector_t offset)
971 {
972 	struct ppl_conf *ppl_conf = log->ppl_conf;
973 	struct md_rdev *rdev = log->rdev;
974 	struct mddev *mddev = rdev->mddev;
975 	sector_t ppl_sector = rdev->ppl.sector + offset +
976 			      (PPL_HEADER_SIZE >> 9);
977 	struct page *page;
978 	int i;
979 	int ret = 0;
980 
981 	page = alloc_page(GFP_KERNEL);
982 	if (!page)
983 		return -ENOMEM;
984 
985 	/* iterate through all PPL entries saved */
986 	for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
987 		struct ppl_header_entry *e = &pplhdr->entries[i];
988 		u32 pp_size = le32_to_cpu(e->pp_size);
989 		sector_t sector = ppl_sector;
990 		int ppl_entry_sectors = pp_size >> 9;
991 		u32 crc, crc_stored;
992 
993 		pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
994 			 __func__, rdev->raid_disk, i,
995 			 (unsigned long long)ppl_sector, pp_size);
996 
997 		crc = ~0;
998 		crc_stored = le32_to_cpu(e->checksum);
999 
1000 		/* read parial parity for this entry and calculate its checksum */
1001 		while (pp_size) {
1002 			int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
1003 
1004 			if (!sync_page_io(rdev, sector - rdev->data_offset,
1005 					s, page, REQ_OP_READ, 0, false)) {
1006 				md_error(mddev, rdev);
1007 				ret = -EIO;
1008 				goto out;
1009 			}
1010 
1011 			crc = crc32c_le(crc, page_address(page), s);
1012 
1013 			pp_size -= s;
1014 			sector += s >> 9;
1015 		}
1016 
1017 		crc = ~crc;
1018 
1019 		if (crc != crc_stored) {
1020 			/*
1021 			 * Don't recover this entry if the checksum does not
1022 			 * match, but keep going and try to recover other
1023 			 * entries.
1024 			 */
1025 			pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1026 				 __func__, crc_stored, crc);
1027 			ppl_conf->mismatch_count++;
1028 		} else {
1029 			ret = ppl_recover_entry(log, e, ppl_sector);
1030 			if (ret)
1031 				goto out;
1032 			ppl_conf->recovered_entries++;
1033 		}
1034 
1035 		ppl_sector += ppl_entry_sectors;
1036 	}
1037 
1038 	/* flush the disk cache after recovery if necessary */
1039 	ret = blkdev_issue_flush(rdev->bdev);
1040 out:
1041 	__free_page(page);
1042 	return ret;
1043 }
1044 
1045 static int ppl_write_empty_header(struct ppl_log *log)
1046 {
1047 	struct page *page;
1048 	struct ppl_header *pplhdr;
1049 	struct md_rdev *rdev = log->rdev;
1050 	int ret = 0;
1051 
1052 	pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
1053 		 rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
1054 
1055 	page = alloc_page(GFP_NOIO | __GFP_ZERO);
1056 	if (!page)
1057 		return -ENOMEM;
1058 
1059 	pplhdr = page_address(page);
1060 	/* zero out PPL space to avoid collision with old PPLs */
1061 	blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
1062 			    log->rdev->ppl.size, GFP_NOIO, 0);
1063 	memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
1064 	pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1065 	pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
1066 
1067 	if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
1068 			  PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
1069 			  REQ_FUA, 0, false)) {
1070 		md_error(rdev->mddev, rdev);
1071 		ret = -EIO;
1072 	}
1073 
1074 	__free_page(page);
1075 	return ret;
1076 }
1077 
1078 static int ppl_load_distributed(struct ppl_log *log)
1079 {
1080 	struct ppl_conf *ppl_conf = log->ppl_conf;
1081 	struct md_rdev *rdev = log->rdev;
1082 	struct mddev *mddev = rdev->mddev;
1083 	struct page *page, *page2;
1084 	struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
1085 	u32 crc, crc_stored;
1086 	u32 signature;
1087 	int ret = 0, i;
1088 	sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
1089 
1090 	pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
1091 	/* read PPL headers, find the recent one */
1092 	page = alloc_page(GFP_KERNEL);
1093 	if (!page)
1094 		return -ENOMEM;
1095 
1096 	page2 = alloc_page(GFP_KERNEL);
1097 	if (!page2) {
1098 		__free_page(page);
1099 		return -ENOMEM;
1100 	}
1101 
1102 	/* searching ppl area for latest ppl */
1103 	while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
1104 		if (!sync_page_io(rdev,
1105 				  rdev->ppl.sector - rdev->data_offset +
1106 				  pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
1107 				  0, false)) {
1108 			md_error(mddev, rdev);
1109 			ret = -EIO;
1110 			/* if not able to read - don't recover any PPL */
1111 			pplhdr = NULL;
1112 			break;
1113 		}
1114 		pplhdr = page_address(page);
1115 
1116 		/* check header validity */
1117 		crc_stored = le32_to_cpu(pplhdr->checksum);
1118 		pplhdr->checksum = 0;
1119 		crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
1120 
1121 		if (crc_stored != crc) {
1122 			pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1123 				 __func__, crc_stored, crc,
1124 				 (unsigned long long)pplhdr_offset);
1125 			pplhdr = prev_pplhdr;
1126 			pplhdr_offset = prev_pplhdr_offset;
1127 			break;
1128 		}
1129 
1130 		signature = le32_to_cpu(pplhdr->signature);
1131 
1132 		if (mddev->external) {
1133 			/*
1134 			 * For external metadata the header signature is set and
1135 			 * validated in userspace.
1136 			 */
1137 			ppl_conf->signature = signature;
1138 		} else if (ppl_conf->signature != signature) {
1139 			pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1140 				 __func__, signature, ppl_conf->signature,
1141 				 (unsigned long long)pplhdr_offset);
1142 			pplhdr = prev_pplhdr;
1143 			pplhdr_offset = prev_pplhdr_offset;
1144 			break;
1145 		}
1146 
1147 		if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1148 		    le64_to_cpu(pplhdr->generation)) {
1149 			/* previous was newest */
1150 			pplhdr = prev_pplhdr;
1151 			pplhdr_offset = prev_pplhdr_offset;
1152 			break;
1153 		}
1154 
1155 		prev_pplhdr_offset = pplhdr_offset;
1156 		prev_pplhdr = pplhdr;
1157 
1158 		swap(page, page2);
1159 
1160 		/* calculate next potential ppl offset */
1161 		for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1162 			pplhdr_offset +=
1163 			    le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1164 		pplhdr_offset += PPL_HEADER_SIZE >> 9;
1165 	}
1166 
1167 	/* no valid ppl found */
1168 	if (!pplhdr)
1169 		ppl_conf->mismatch_count++;
1170 	else
1171 		pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1172 		    __func__, (unsigned long long)pplhdr_offset,
1173 		    le64_to_cpu(pplhdr->generation));
1174 
1175 	/* attempt to recover from log if we are starting a dirty array */
1176 	if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1177 		ret = ppl_recover(log, pplhdr, pplhdr_offset);
1178 
1179 	/* write empty header if we are starting the array */
1180 	if (!ret && !mddev->pers)
1181 		ret = ppl_write_empty_header(log);
1182 
1183 	__free_page(page);
1184 	__free_page(page2);
1185 
1186 	pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1187 		 __func__, ret, ppl_conf->mismatch_count,
1188 		 ppl_conf->recovered_entries);
1189 	return ret;
1190 }
1191 
1192 static int ppl_load(struct ppl_conf *ppl_conf)
1193 {
1194 	int ret = 0;
1195 	u32 signature = 0;
1196 	bool signature_set = false;
1197 	int i;
1198 
1199 	for (i = 0; i < ppl_conf->count; i++) {
1200 		struct ppl_log *log = &ppl_conf->child_logs[i];
1201 
1202 		/* skip missing drive */
1203 		if (!log->rdev)
1204 			continue;
1205 
1206 		ret = ppl_load_distributed(log);
1207 		if (ret)
1208 			break;
1209 
1210 		/*
1211 		 * For external metadata we can't check if the signature is
1212 		 * correct on a single drive, but we can check if it is the same
1213 		 * on all drives.
1214 		 */
1215 		if (ppl_conf->mddev->external) {
1216 			if (!signature_set) {
1217 				signature = ppl_conf->signature;
1218 				signature_set = true;
1219 			} else if (signature != ppl_conf->signature) {
1220 				pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1221 					mdname(ppl_conf->mddev));
1222 				ret = -EINVAL;
1223 				break;
1224 			}
1225 		}
1226 	}
1227 
1228 	pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1229 		 __func__, ret, ppl_conf->mismatch_count,
1230 		 ppl_conf->recovered_entries);
1231 	return ret;
1232 }
1233 
1234 static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1235 {
1236 	clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1237 	clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1238 
1239 	kfree(ppl_conf->child_logs);
1240 
1241 	bioset_exit(&ppl_conf->bs);
1242 	bioset_exit(&ppl_conf->flush_bs);
1243 	mempool_exit(&ppl_conf->io_pool);
1244 	kmem_cache_destroy(ppl_conf->io_kc);
1245 
1246 	kfree(ppl_conf);
1247 }
1248 
1249 void ppl_exit_log(struct r5conf *conf)
1250 {
1251 	struct ppl_conf *ppl_conf = conf->log_private;
1252 
1253 	if (ppl_conf) {
1254 		__ppl_exit_log(ppl_conf);
1255 		conf->log_private = NULL;
1256 	}
1257 }
1258 
1259 static int ppl_validate_rdev(struct md_rdev *rdev)
1260 {
1261 	char b[BDEVNAME_SIZE];
1262 	int ppl_data_sectors;
1263 	int ppl_size_new;
1264 
1265 	/*
1266 	 * The configured PPL size must be enough to store
1267 	 * the header and (at the very least) partial parity
1268 	 * for one stripe. Round it down to ensure the data
1269 	 * space is cleanly divisible by stripe size.
1270 	 */
1271 	ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1272 
1273 	if (ppl_data_sectors > 0)
1274 		ppl_data_sectors = rounddown(ppl_data_sectors,
1275 				RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
1276 
1277 	if (ppl_data_sectors <= 0) {
1278 		pr_warn("md/raid:%s: PPL space too small on %s\n",
1279 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1280 		return -ENOSPC;
1281 	}
1282 
1283 	ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1284 
1285 	if ((rdev->ppl.sector < rdev->data_offset &&
1286 	     rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1287 	    (rdev->ppl.sector >= rdev->data_offset &&
1288 	     rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1289 		pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1290 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1291 		return -EINVAL;
1292 	}
1293 
1294 	if (!rdev->mddev->external &&
1295 	    ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1296 	     (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1297 		pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1298 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1299 		return -EINVAL;
1300 	}
1301 
1302 	rdev->ppl.size = ppl_size_new;
1303 
1304 	return 0;
1305 }
1306 
1307 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1308 {
1309 	struct request_queue *q;
1310 
1311 	if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1312 				      PPL_HEADER_SIZE) * 2) {
1313 		log->use_multippl = true;
1314 		set_bit(MD_HAS_MULTIPLE_PPLS,
1315 			&log->ppl_conf->mddev->flags);
1316 		log->entry_space = PPL_SPACE_SIZE;
1317 	} else {
1318 		log->use_multippl = false;
1319 		log->entry_space = (log->rdev->ppl.size << 9) -
1320 				   PPL_HEADER_SIZE;
1321 	}
1322 	log->next_io_sector = rdev->ppl.sector;
1323 
1324 	q = bdev_get_queue(rdev->bdev);
1325 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1326 		log->wb_cache_on = true;
1327 }
1328 
1329 int ppl_init_log(struct r5conf *conf)
1330 {
1331 	struct ppl_conf *ppl_conf;
1332 	struct mddev *mddev = conf->mddev;
1333 	int ret = 0;
1334 	int max_disks;
1335 	int i;
1336 
1337 	pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1338 		 mdname(conf->mddev));
1339 
1340 	if (PAGE_SIZE != 4096)
1341 		return -EINVAL;
1342 
1343 	if (mddev->level != 5) {
1344 		pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1345 			mdname(mddev), mddev->level);
1346 		return -EINVAL;
1347 	}
1348 
1349 	if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1350 		pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1351 			mdname(mddev));
1352 		return -EINVAL;
1353 	}
1354 
1355 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1356 		pr_warn("md/raid:%s PPL is not compatible with journal\n",
1357 			mdname(mddev));
1358 		return -EINVAL;
1359 	}
1360 
1361 	max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
1362 		BITS_PER_BYTE;
1363 	if (conf->raid_disks > max_disks) {
1364 		pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1365 			mdname(mddev), max_disks);
1366 		return -EINVAL;
1367 	}
1368 
1369 	ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1370 	if (!ppl_conf)
1371 		return -ENOMEM;
1372 
1373 	ppl_conf->mddev = mddev;
1374 
1375 	ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1376 	if (!ppl_conf->io_kc) {
1377 		ret = -ENOMEM;
1378 		goto err;
1379 	}
1380 
1381 	ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1382 			   ppl_io_pool_free, ppl_conf->io_kc);
1383 	if (ret)
1384 		goto err;
1385 
1386 	ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1387 	if (ret)
1388 		goto err;
1389 
1390 	ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1391 	if (ret)
1392 		goto err;
1393 
1394 	ppl_conf->count = conf->raid_disks;
1395 	ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1396 				       GFP_KERNEL);
1397 	if (!ppl_conf->child_logs) {
1398 		ret = -ENOMEM;
1399 		goto err;
1400 	}
1401 
1402 	atomic64_set(&ppl_conf->seq, 0);
1403 	INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1404 	spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1405 	ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
1406 
1407 	if (!mddev->external) {
1408 		ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1409 		ppl_conf->block_size = 512;
1410 	} else {
1411 		ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1412 	}
1413 
1414 	for (i = 0; i < ppl_conf->count; i++) {
1415 		struct ppl_log *log = &ppl_conf->child_logs[i];
1416 		struct md_rdev *rdev = conf->disks[i].rdev;
1417 
1418 		mutex_init(&log->io_mutex);
1419 		spin_lock_init(&log->io_list_lock);
1420 		INIT_LIST_HEAD(&log->io_list);
1421 
1422 		log->ppl_conf = ppl_conf;
1423 		log->rdev = rdev;
1424 
1425 		if (rdev) {
1426 			ret = ppl_validate_rdev(rdev);
1427 			if (ret)
1428 				goto err;
1429 
1430 			ppl_init_child_log(log, rdev);
1431 		}
1432 	}
1433 
1434 	/* load and possibly recover the logs from the member disks */
1435 	ret = ppl_load(ppl_conf);
1436 
1437 	if (ret) {
1438 		goto err;
1439 	} else if (!mddev->pers && mddev->recovery_cp == 0 &&
1440 		   ppl_conf->recovered_entries > 0 &&
1441 		   ppl_conf->mismatch_count == 0) {
1442 		/*
1443 		 * If we are starting a dirty array and the recovery succeeds
1444 		 * without any issues, set the array as clean.
1445 		 */
1446 		mddev->recovery_cp = MaxSector;
1447 		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1448 	} else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1449 		/* no mismatch allowed when enabling PPL for a running array */
1450 		ret = -EINVAL;
1451 		goto err;
1452 	}
1453 
1454 	conf->log_private = ppl_conf;
1455 	set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1456 
1457 	return 0;
1458 err:
1459 	__ppl_exit_log(ppl_conf);
1460 	return ret;
1461 }
1462 
1463 int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1464 {
1465 	struct ppl_conf *ppl_conf = conf->log_private;
1466 	struct ppl_log *log;
1467 	int ret = 0;
1468 	char b[BDEVNAME_SIZE];
1469 
1470 	if (!rdev)
1471 		return -EINVAL;
1472 
1473 	pr_debug("%s: disk: %d operation: %s dev: %s\n",
1474 		 __func__, rdev->raid_disk, add ? "add" : "remove",
1475 		 bdevname(rdev->bdev, b));
1476 
1477 	if (rdev->raid_disk < 0)
1478 		return 0;
1479 
1480 	if (rdev->raid_disk >= ppl_conf->count)
1481 		return -ENODEV;
1482 
1483 	log = &ppl_conf->child_logs[rdev->raid_disk];
1484 
1485 	mutex_lock(&log->io_mutex);
1486 	if (add) {
1487 		ret = ppl_validate_rdev(rdev);
1488 		if (!ret) {
1489 			log->rdev = rdev;
1490 			ret = ppl_write_empty_header(log);
1491 			ppl_init_child_log(log, rdev);
1492 		}
1493 	} else {
1494 		log->rdev = NULL;
1495 	}
1496 	mutex_unlock(&log->io_mutex);
1497 
1498 	return ret;
1499 }
1500 
1501 static ssize_t
1502 ppl_write_hint_show(struct mddev *mddev, char *buf)
1503 {
1504 	size_t ret = 0;
1505 	struct r5conf *conf;
1506 	struct ppl_conf *ppl_conf = NULL;
1507 
1508 	spin_lock(&mddev->lock);
1509 	conf = mddev->private;
1510 	if (conf && raid5_has_ppl(conf))
1511 		ppl_conf = conf->log_private;
1512 	ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
1513 	spin_unlock(&mddev->lock);
1514 
1515 	return ret;
1516 }
1517 
1518 static ssize_t
1519 ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
1520 {
1521 	struct r5conf *conf;
1522 	struct ppl_conf *ppl_conf;
1523 	int err = 0;
1524 	unsigned short new;
1525 
1526 	if (len >= PAGE_SIZE)
1527 		return -EINVAL;
1528 	if (kstrtou16(page, 10, &new))
1529 		return -EINVAL;
1530 
1531 	err = mddev_lock(mddev);
1532 	if (err)
1533 		return err;
1534 
1535 	conf = mddev->private;
1536 	if (!conf) {
1537 		err = -ENODEV;
1538 	} else if (raid5_has_ppl(conf)) {
1539 		ppl_conf = conf->log_private;
1540 		if (!ppl_conf)
1541 			err = -EINVAL;
1542 		else
1543 			ppl_conf->write_hint = new;
1544 	} else {
1545 		err = -EINVAL;
1546 	}
1547 
1548 	mddev_unlock(mddev);
1549 
1550 	return err ?: len;
1551 }
1552 
1553 struct md_sysfs_entry
1554 ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
1555 			ppl_write_hint_show,
1556 			ppl_write_hint_store);
1557