xref: /linux/drivers/md/raid1-10.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Maximum size of each resync request */
3 #define RESYNC_BLOCK_SIZE (64*1024)
4 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
5 
6 /* when we get a read error on a read-only array, we redirect to another
7  * device without failing the first device, or trying to over-write to
8  * correct the read error.  To keep track of bad blocks on a per-bio
9  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
10  */
11 #define IO_BLOCKED ((struct bio *)1)
12 /* When we successfully write to a known bad-block, we need to remove the
13  * bad-block marking which must be done from process context.  So we record
14  * the success by setting devs[n].bio to IO_MADE_GOOD
15  */
16 #define IO_MADE_GOOD ((struct bio *)2)
17 
18 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
19 #define MAX_PLUG_BIO 32
20 
21 /* for managing resync I/O pages */
22 struct resync_pages {
23 	void		*raid_bio;
24 	struct page	*pages[RESYNC_PAGES];
25 };
26 
27 struct raid1_plug_cb {
28 	struct blk_plug_cb	cb;
29 	struct bio_list		pending;
30 	unsigned int		count;
31 };
32 
33 static void rbio_pool_free(void *rbio, void *data)
34 {
35 	kfree(rbio);
36 }
37 
38 static inline int resync_alloc_pages(struct resync_pages *rp,
39 				     gfp_t gfp_flags)
40 {
41 	int i;
42 
43 	for (i = 0; i < RESYNC_PAGES; i++) {
44 		rp->pages[i] = alloc_page(gfp_flags);
45 		if (!rp->pages[i])
46 			goto out_free;
47 	}
48 
49 	return 0;
50 
51 out_free:
52 	while (--i >= 0)
53 		put_page(rp->pages[i]);
54 	return -ENOMEM;
55 }
56 
57 static inline void resync_free_pages(struct resync_pages *rp)
58 {
59 	int i;
60 
61 	for (i = 0; i < RESYNC_PAGES; i++)
62 		put_page(rp->pages[i]);
63 }
64 
65 static inline void resync_get_all_pages(struct resync_pages *rp)
66 {
67 	int i;
68 
69 	for (i = 0; i < RESYNC_PAGES; i++)
70 		get_page(rp->pages[i]);
71 }
72 
73 static inline struct page *resync_fetch_page(struct resync_pages *rp,
74 					     unsigned idx)
75 {
76 	if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
77 		return NULL;
78 	return rp->pages[idx];
79 }
80 
81 /*
82  * 'strct resync_pages' stores actual pages used for doing the resync
83  *  IO, and it is per-bio, so make .bi_private points to it.
84  */
85 static inline struct resync_pages *get_resync_pages(struct bio *bio)
86 {
87 	return bio->bi_private;
88 }
89 
90 /* generally called after bio_reset() for reseting bvec */
91 static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
92 			       int size)
93 {
94 	int idx = 0;
95 
96 	/* initialize bvec table again */
97 	do {
98 		struct page *page = resync_fetch_page(rp, idx);
99 		int len = min_t(int, size, PAGE_SIZE);
100 
101 		if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
102 			bio->bi_status = BLK_STS_RESOURCE;
103 			bio_endio(bio);
104 			return;
105 		}
106 
107 		size -= len;
108 	} while (idx++ < RESYNC_PAGES && size > 0);
109 }
110 
111 
112 static inline void raid1_submit_write(struct bio *bio)
113 {
114 	struct md_rdev *rdev = (void *)bio->bi_bdev;
115 
116 	bio->bi_next = NULL;
117 	bio_set_dev(bio, rdev->bdev);
118 	if (test_bit(Faulty, &rdev->flags))
119 		bio_io_error(bio);
120 	else if (unlikely(bio_op(bio) ==  REQ_OP_DISCARD &&
121 			  !bdev_max_discard_sectors(bio->bi_bdev)))
122 		/* Just ignore it */
123 		bio_endio(bio);
124 	else
125 		submit_bio_noacct(bio);
126 }
127 
128 static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
129 				      blk_plug_cb_fn unplug, int copies)
130 {
131 	struct raid1_plug_cb *plug = NULL;
132 	struct blk_plug_cb *cb;
133 
134 	/*
135 	 * If bitmap is not enabled, it's safe to submit the io directly, and
136 	 * this can get optimal performance.
137 	 */
138 	if (!md_bitmap_enabled(mddev, true)) {
139 		raid1_submit_write(bio);
140 		return true;
141 	}
142 
143 	cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
144 	if (!cb)
145 		return false;
146 
147 	plug = container_of(cb, struct raid1_plug_cb, cb);
148 	bio_list_add(&plug->pending, bio);
149 	if (++plug->count / MAX_PLUG_BIO >= copies) {
150 		list_del(&cb->list);
151 		cb->callback(cb, false);
152 	}
153 
154 
155 	return true;
156 }
157 
158 /*
159  * current->bio_list will be set under submit_bio() context, in this case bitmap
160  * io will be added to the list and wait for current io submission to finish,
161  * while current io submission must wait for bitmap io to be done. In order to
162  * avoid such deadlock, submit bitmap io asynchronously.
163  */
164 static inline void raid1_prepare_flush_writes(struct mddev *mddev)
165 {
166 	mddev->bitmap_ops->unplug(mddev, current->bio_list == NULL);
167 }
168 
169 /*
170  * Used by fix_read_error() to decay the per rdev read_errors.
171  * We halve the read error count for every hour that has elapsed
172  * since the last recorded read error.
173  */
174 static inline void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
175 {
176 	long cur_time_mon;
177 	unsigned long hours_since_last;
178 	unsigned int read_errors = atomic_read(&rdev->read_errors);
179 
180 	cur_time_mon = ktime_get_seconds();
181 
182 	if (rdev->last_read_error == 0) {
183 		/* first time we've seen a read error */
184 		rdev->last_read_error = cur_time_mon;
185 		return;
186 	}
187 
188 	hours_since_last = (long)(cur_time_mon -
189 			    rdev->last_read_error) / 3600;
190 
191 	rdev->last_read_error = cur_time_mon;
192 
193 	/*
194 	 * if hours_since_last is > the number of bits in read_errors
195 	 * just set read errors to 0. We do this to avoid
196 	 * overflowing the shift of read_errors by hours_since_last.
197 	 */
198 	if (hours_since_last >= 8 * sizeof(read_errors))
199 		atomic_set(&rdev->read_errors, 0);
200 	else
201 		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
202 }
203 
204 static inline bool exceed_read_errors(struct mddev *mddev, struct md_rdev *rdev)
205 {
206 	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
207 	int read_errors;
208 
209 	check_decay_read_errors(mddev, rdev);
210 	read_errors =  atomic_inc_return(&rdev->read_errors);
211 	if (read_errors > max_read_errors) {
212 		pr_notice("md/"RAID_1_10_NAME":%s: %pg: Raid device exceeded read_error threshold [cur %d:max %d]\n",
213 			  mdname(mddev), rdev->bdev, read_errors, max_read_errors);
214 		pr_notice("md/"RAID_1_10_NAME":%s: %pg: Failing raid device\n",
215 			  mdname(mddev), rdev->bdev);
216 		md_error(mddev, rdev);
217 		return true;
218 	}
219 
220 	return false;
221 }
222 
223 /**
224  * raid1_check_read_range() - check a given read range for bad blocks,
225  * available read length is returned;
226  * @rdev: the rdev to read;
227  * @this_sector: read position;
228  * @len: read length;
229  *
230  * helper function for read_balance()
231  *
232  * 1) If there are no bad blocks in the range, @len is returned;
233  * 2) If the range are all bad blocks, 0 is returned;
234  * 3) If there are partial bad blocks:
235  *  - If the bad block range starts after @this_sector, the length of first
236  *  good region is returned;
237  *  - If the bad block range starts before @this_sector, 0 is returned and
238  *  the @len is updated to the offset into the region before we get to the
239  *  good blocks;
240  */
241 static inline int raid1_check_read_range(struct md_rdev *rdev,
242 					 sector_t this_sector, int *len)
243 {
244 	sector_t first_bad;
245 	sector_t bad_sectors;
246 
247 	/* no bad block overlap */
248 	if (!is_badblock(rdev, this_sector, *len, &first_bad, &bad_sectors))
249 		return *len;
250 
251 	/*
252 	 * bad block range starts offset into our range so we can return the
253 	 * number of sectors before the bad blocks start.
254 	 */
255 	if (first_bad > this_sector)
256 		return first_bad - this_sector;
257 
258 	/* read range is fully consumed by bad blocks. */
259 	if (this_sector + *len <= first_bad + bad_sectors)
260 		return 0;
261 
262 	/*
263 	 * final case, bad block range starts before or at the start of our
264 	 * range but does not cover our entire range so we still return 0 but
265 	 * update the length with the number of sectors before we get to the
266 	 * good ones.
267 	 */
268 	*len = first_bad + bad_sectors - this_sector;
269 	return 0;
270 }
271 
272 /*
273  * Check if read should choose the first rdev.
274  *
275  * Balance on the whole device if no resync is going on (recovery is ok) or
276  * below the resync window. Otherwise, take the first readable disk.
277  */
278 static inline bool raid1_should_read_first(struct mddev *mddev,
279 					   sector_t this_sector, int len)
280 {
281 	if ((mddev->resync_offset < this_sector + len))
282 		return true;
283 
284 	if (mddev_is_clustered(mddev) &&
285 	    mddev->cluster_ops->area_resyncing(mddev, READ, this_sector,
286 					       this_sector + len))
287 		return true;
288 
289 	return false;
290 }
291 
292 /*
293  * bio with REQ_RAHEAD or REQ_NOWAIT can fail at anytime, before such IO is
294  * submitted to the underlying disks, hence don't record badblocks or retry
295  * in this case.
296  */
297 static inline bool raid1_should_handle_error(struct bio *bio)
298 {
299 	return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT));
300 }
301