1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2010-2011 Neil Brown
4 * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the GPL.
7 */
8
9 #include <linux/slab.h>
10 #include <linux/module.h>
11
12 #include "md.h"
13 #include "raid1.h"
14 #include "raid5.h"
15 #include "raid10.h"
16 #include "md-bitmap.h"
17
18 #include <linux/device-mapper.h>
19
20 #define DM_MSG_PREFIX "raid"
21 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
22
23 /*
24 * Minimum sectors of free reshape space per raid device
25 */
26 #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
27
28 /*
29 * Minimum journal space 4 MiB in sectors.
30 */
31 #define MIN_RAID456_JOURNAL_SPACE (4*2048)
32
33 static bool devices_handle_discard_safely;
34
35 /*
36 * The following flags are used by dm-raid to set up the array state.
37 * They must be cleared before md_run is called.
38 */
39 #define FirstUse 10 /* rdev flag */
40
41 struct raid_dev {
42 /*
43 * Two DM devices, one to hold metadata and one to hold the
44 * actual data/parity. The reason for this is to not confuse
45 * ti->len and give more flexibility in altering size and
46 * characteristics.
47 *
48 * While it is possible for this device to be associated
49 * with a different physical device than the data_dev, it
50 * is intended for it to be the same.
51 * |--------- Physical Device ---------|
52 * |- meta_dev -|------ data_dev ------|
53 */
54 struct dm_dev *meta_dev;
55 struct dm_dev *data_dev;
56 struct md_rdev rdev;
57 };
58
59 /*
60 * Bits for establishing rs->ctr_flags
61 *
62 * 1 = no flag value
63 * 2 = flag with value
64 */
65 #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
66 #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
67 #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
68 #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
69 #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
70 #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
71 #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
72 #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
73 #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
74 #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
75 #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
76 #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
77 /* New for v1.9.0 */
78 #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid1/4/5/6/10! */
79 #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
80 #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
81
82 /* New for v1.10.0 */
83 #define __CTR_FLAG_JOURNAL_DEV 15 /* 2 */ /* Only with raid4/5/6 (journal device)! */
84
85 /* New for v1.11.1 */
86 #define __CTR_FLAG_JOURNAL_MODE 16 /* 2 */ /* Only with raid4/5/6 (journal mode)! */
87
88 /*
89 * Flags for rs->ctr_flags field.
90 */
91 #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
92 #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
93 #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
94 #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
95 #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
96 #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
97 #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
98 #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
99 #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
100 #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
101 #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
102 #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
103 #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
104 #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
105 #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
106 #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
107 #define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
108
109 /*
110 * Definitions of various constructor flags to
111 * be used in checks of valid / invalid flags
112 * per raid level.
113 */
114 /* Define all any sync flags */
115 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
116
117 /* Define flags for options without argument (e.g. 'nosync') */
118 #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
119 CTR_FLAG_RAID10_USE_NEAR_SETS)
120
121 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
122 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
123 CTR_FLAG_WRITE_MOSTLY | \
124 CTR_FLAG_DAEMON_SLEEP | \
125 CTR_FLAG_MIN_RECOVERY_RATE | \
126 CTR_FLAG_MAX_RECOVERY_RATE | \
127 CTR_FLAG_MAX_WRITE_BEHIND | \
128 CTR_FLAG_STRIPE_CACHE | \
129 CTR_FLAG_REGION_SIZE | \
130 CTR_FLAG_RAID10_COPIES | \
131 CTR_FLAG_RAID10_FORMAT | \
132 CTR_FLAG_DELTA_DISKS | \
133 CTR_FLAG_DATA_OFFSET | \
134 CTR_FLAG_JOURNAL_DEV | \
135 CTR_FLAG_JOURNAL_MODE)
136
137 /* Valid options definitions per raid level... */
138
139 /* "raid0" does only accept data offset */
140 #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
141
142 /* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */
143 #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
144 CTR_FLAG_REBUILD | \
145 CTR_FLAG_WRITE_MOSTLY | \
146 CTR_FLAG_DAEMON_SLEEP | \
147 CTR_FLAG_MIN_RECOVERY_RATE | \
148 CTR_FLAG_MAX_RECOVERY_RATE | \
149 CTR_FLAG_MAX_WRITE_BEHIND | \
150 CTR_FLAG_REGION_SIZE | \
151 CTR_FLAG_DELTA_DISKS | \
152 CTR_FLAG_DATA_OFFSET)
153
154 /* "raid10" does not accept any raid1 or stripe cache options */
155 #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
156 CTR_FLAG_REBUILD | \
157 CTR_FLAG_DAEMON_SLEEP | \
158 CTR_FLAG_MIN_RECOVERY_RATE | \
159 CTR_FLAG_MAX_RECOVERY_RATE | \
160 CTR_FLAG_REGION_SIZE | \
161 CTR_FLAG_RAID10_COPIES | \
162 CTR_FLAG_RAID10_FORMAT | \
163 CTR_FLAG_DELTA_DISKS | \
164 CTR_FLAG_DATA_OFFSET | \
165 CTR_FLAG_RAID10_USE_NEAR_SETS)
166
167 /*
168 * "raid4/5/6" do not accept any raid1 or raid10 specific options
169 *
170 * "raid6" does not accept "nosync", because it is not guaranteed
171 * that both parity and q-syndrome are being written properly with
172 * any writes
173 */
174 #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
175 CTR_FLAG_REBUILD | \
176 CTR_FLAG_DAEMON_SLEEP | \
177 CTR_FLAG_MIN_RECOVERY_RATE | \
178 CTR_FLAG_MAX_RECOVERY_RATE | \
179 CTR_FLAG_STRIPE_CACHE | \
180 CTR_FLAG_REGION_SIZE | \
181 CTR_FLAG_DELTA_DISKS | \
182 CTR_FLAG_DATA_OFFSET | \
183 CTR_FLAG_JOURNAL_DEV | \
184 CTR_FLAG_JOURNAL_MODE)
185
186 #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
187 CTR_FLAG_REBUILD | \
188 CTR_FLAG_DAEMON_SLEEP | \
189 CTR_FLAG_MIN_RECOVERY_RATE | \
190 CTR_FLAG_MAX_RECOVERY_RATE | \
191 CTR_FLAG_STRIPE_CACHE | \
192 CTR_FLAG_REGION_SIZE | \
193 CTR_FLAG_DELTA_DISKS | \
194 CTR_FLAG_DATA_OFFSET | \
195 CTR_FLAG_JOURNAL_DEV | \
196 CTR_FLAG_JOURNAL_MODE)
197 /* ...valid options definitions per raid level */
198
199 /*
200 * Flags for rs->runtime_flags field
201 * (RT_FLAG prefix meaning "runtime flag")
202 *
203 * These are all internal and used to define runtime state,
204 * e.g. to prevent another resume from preresume processing
205 * the raid set all over again.
206 */
207 #define RT_FLAG_RS_PRERESUMED 0
208 #define RT_FLAG_RS_RESUMED 1
209 #define RT_FLAG_RS_BITMAP_LOADED 2
210 #define RT_FLAG_UPDATE_SBS 3
211 #define RT_FLAG_RESHAPE_RS 4
212 #define RT_FLAG_RS_SUSPENDED 5
213 #define RT_FLAG_RS_IN_SYNC 6
214 #define RT_FLAG_RS_RESYNCING 7
215 #define RT_FLAG_RS_GROW 8
216 #define RT_FLAG_RS_FROZEN 9
217
218 /* Array elements of 64 bit needed for rebuild/failed disk bits */
219 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
220
221 /*
222 * raid set level, layout and chunk sectors backup/restore
223 */
224 struct rs_layout {
225 int new_level;
226 int new_layout;
227 int new_chunk_sectors;
228 };
229
230 struct raid_set {
231 struct dm_target *ti;
232
233 uint32_t stripe_cache_entries;
234 unsigned long ctr_flags;
235 unsigned long runtime_flags;
236
237 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
238
239 int raid_disks;
240 int delta_disks;
241 int data_offset;
242 int raid10_copies;
243 int requested_bitmap_chunk_sectors;
244
245 struct mddev md;
246 struct raid_type *raid_type;
247
248 sector_t array_sectors;
249 sector_t dev_sectors;
250
251 /* Optional raid4/5/6 journal device */
252 struct journal_dev {
253 struct dm_dev *dev;
254 struct md_rdev rdev;
255 int mode;
256 } journal_dev;
257
258 struct raid_dev dev[] __counted_by(raid_disks);
259 };
260
rs_config_backup(struct raid_set * rs,struct rs_layout * l)261 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
262 {
263 struct mddev *mddev = &rs->md;
264
265 l->new_level = mddev->new_level;
266 l->new_layout = mddev->new_layout;
267 l->new_chunk_sectors = mddev->new_chunk_sectors;
268 }
269
rs_config_restore(struct raid_set * rs,struct rs_layout * l)270 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
271 {
272 struct mddev *mddev = &rs->md;
273
274 mddev->new_level = l->new_level;
275 mddev->new_layout = l->new_layout;
276 mddev->new_chunk_sectors = l->new_chunk_sectors;
277 }
278
279 /* raid10 algorithms (i.e. formats) */
280 #define ALGORITHM_RAID10_DEFAULT 0
281 #define ALGORITHM_RAID10_NEAR 1
282 #define ALGORITHM_RAID10_OFFSET 2
283 #define ALGORITHM_RAID10_FAR 3
284
285 /* Supported raid types and properties. */
286 static struct raid_type {
287 const char *name; /* RAID algorithm. */
288 const char *descr; /* Descriptor text for logging. */
289 const unsigned int parity_devs; /* # of parity devices. */
290 const unsigned int minimal_devs;/* minimal # of devices in set. */
291 const unsigned int level; /* RAID level. */
292 const unsigned int algorithm; /* RAID algorithm. */
293 } raid_types[] = {
294 {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
295 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
296 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
297 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
298 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
299 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
300 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
301 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
302 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
303 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
304 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
305 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
306 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
307 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
308 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
309 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
310 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
311 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
312 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
313 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
314 };
315
316 /* True, if @v is in inclusive range [@min, @max] */
__within_range(long v,long min,long max)317 static bool __within_range(long v, long min, long max)
318 {
319 return v >= min && v <= max;
320 }
321
322 /* All table line arguments are defined here */
323 static struct arg_name_flag {
324 const unsigned long flag;
325 const char *name;
326 } __arg_name_flags[] = {
327 { CTR_FLAG_SYNC, "sync"},
328 { CTR_FLAG_NOSYNC, "nosync"},
329 { CTR_FLAG_REBUILD, "rebuild"},
330 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
331 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
332 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
333 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
334 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
335 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
336 { CTR_FLAG_REGION_SIZE, "region_size"},
337 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
338 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
339 { CTR_FLAG_DATA_OFFSET, "data_offset"},
340 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
341 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
342 { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
343 { CTR_FLAG_JOURNAL_MODE, "journal_mode" },
344 };
345
346 /* Return argument name string for given @flag */
dm_raid_arg_name_by_flag(const uint32_t flag)347 static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
348 {
349 if (hweight32(flag) == 1) {
350 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
351
352 while (anf-- > __arg_name_flags)
353 if (flag & anf->flag)
354 return anf->name;
355
356 } else
357 DMERR("%s called with more than one flag!", __func__);
358
359 return NULL;
360 }
361
362 /* Define correlation of raid456 journal cache modes and dm-raid target line parameters */
363 static struct {
364 const int mode;
365 const char *param;
366 } _raid456_journal_mode[] = {
367 { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
368 { R5C_JOURNAL_MODE_WRITE_BACK, "writeback" }
369 };
370
371 /* Return MD raid4/5/6 journal mode for dm @journal_mode one */
dm_raid_journal_mode_to_md(const char * mode)372 static int dm_raid_journal_mode_to_md(const char *mode)
373 {
374 int m = ARRAY_SIZE(_raid456_journal_mode);
375
376 while (m--)
377 if (!strcasecmp(mode, _raid456_journal_mode[m].param))
378 return _raid456_journal_mode[m].mode;
379
380 return -EINVAL;
381 }
382
383 /* Return dm-raid raid4/5/6 journal mode string for @mode */
md_journal_mode_to_dm_raid(const int mode)384 static const char *md_journal_mode_to_dm_raid(const int mode)
385 {
386 int m = ARRAY_SIZE(_raid456_journal_mode);
387
388 while (m--)
389 if (mode == _raid456_journal_mode[m].mode)
390 return _raid456_journal_mode[m].param;
391
392 return "unknown";
393 }
394
395 /*
396 * Bool helpers to test for various raid levels of a raid set.
397 * It's level as reported by the superblock rather than
398 * the requested raid_type passed to the constructor.
399 */
400 /* Return true, if raid set in @rs is raid0 */
rs_is_raid0(struct raid_set * rs)401 static bool rs_is_raid0(struct raid_set *rs)
402 {
403 return !rs->md.level;
404 }
405
406 /* Return true, if raid set in @rs is raid1 */
rs_is_raid1(struct raid_set * rs)407 static bool rs_is_raid1(struct raid_set *rs)
408 {
409 return rs->md.level == 1;
410 }
411
412 /* Return true, if raid set in @rs is raid10 */
rs_is_raid10(struct raid_set * rs)413 static bool rs_is_raid10(struct raid_set *rs)
414 {
415 return rs->md.level == 10;
416 }
417
418 /* Return true, if raid set in @rs is level 6 */
rs_is_raid6(struct raid_set * rs)419 static bool rs_is_raid6(struct raid_set *rs)
420 {
421 return rs->md.level == 6;
422 }
423
424 /* Return true, if raid set in @rs is level 4, 5 or 6 */
rs_is_raid456(struct raid_set * rs)425 static bool rs_is_raid456(struct raid_set *rs)
426 {
427 return __within_range(rs->md.level, 4, 6);
428 }
429
430 /* Return true, if raid set in @rs is reshapable */
431 static bool __is_raid10_far(int layout);
rs_is_reshapable(struct raid_set * rs)432 static bool rs_is_reshapable(struct raid_set *rs)
433 {
434 return rs_is_raid456(rs) ||
435 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
436 }
437
438 /* Return true, if raid set in @rs is recovering */
rs_is_recovering(struct raid_set * rs)439 static bool rs_is_recovering(struct raid_set *rs)
440 {
441 return rs->md.recovery_cp < rs->md.dev_sectors;
442 }
443
444 /* Return true, if raid set in @rs is reshaping */
rs_is_reshaping(struct raid_set * rs)445 static bool rs_is_reshaping(struct raid_set *rs)
446 {
447 return rs->md.reshape_position != MaxSector;
448 }
449
450 /*
451 * bool helpers to test for various raid levels of a raid type @rt
452 */
453
454 /* Return true, if raid type in @rt is raid0 */
rt_is_raid0(struct raid_type * rt)455 static bool rt_is_raid0(struct raid_type *rt)
456 {
457 return !rt->level;
458 }
459
460 /* Return true, if raid type in @rt is raid1 */
rt_is_raid1(struct raid_type * rt)461 static bool rt_is_raid1(struct raid_type *rt)
462 {
463 return rt->level == 1;
464 }
465
466 /* Return true, if raid type in @rt is raid10 */
rt_is_raid10(struct raid_type * rt)467 static bool rt_is_raid10(struct raid_type *rt)
468 {
469 return rt->level == 10;
470 }
471
472 /* Return true, if raid type in @rt is raid4/5 */
rt_is_raid45(struct raid_type * rt)473 static bool rt_is_raid45(struct raid_type *rt)
474 {
475 return __within_range(rt->level, 4, 5);
476 }
477
478 /* Return true, if raid type in @rt is raid6 */
rt_is_raid6(struct raid_type * rt)479 static bool rt_is_raid6(struct raid_type *rt)
480 {
481 return rt->level == 6;
482 }
483
484 /* Return true, if raid type in @rt is raid4/5/6 */
rt_is_raid456(struct raid_type * rt)485 static bool rt_is_raid456(struct raid_type *rt)
486 {
487 return __within_range(rt->level, 4, 6);
488 }
489 /* END: raid level bools */
490
491 /* Return valid ctr flags for the raid level of @rs */
__valid_flags(struct raid_set * rs)492 static unsigned long __valid_flags(struct raid_set *rs)
493 {
494 if (rt_is_raid0(rs->raid_type))
495 return RAID0_VALID_FLAGS;
496 else if (rt_is_raid1(rs->raid_type))
497 return RAID1_VALID_FLAGS;
498 else if (rt_is_raid10(rs->raid_type))
499 return RAID10_VALID_FLAGS;
500 else if (rt_is_raid45(rs->raid_type))
501 return RAID45_VALID_FLAGS;
502 else if (rt_is_raid6(rs->raid_type))
503 return RAID6_VALID_FLAGS;
504
505 return 0;
506 }
507
508 /*
509 * Check for valid flags set on @rs
510 *
511 * Has to be called after parsing of the ctr flags!
512 */
rs_check_for_valid_flags(struct raid_set * rs)513 static int rs_check_for_valid_flags(struct raid_set *rs)
514 {
515 if (rs->ctr_flags & ~__valid_flags(rs)) {
516 rs->ti->error = "Invalid flags combination";
517 return -EINVAL;
518 }
519
520 return 0;
521 }
522
523 /* MD raid10 bit definitions and helpers */
524 #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */
525 #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */
526 #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */
527 #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */
528
529 /* Return md raid10 near copies for @layout */
__raid10_near_copies(int layout)530 static unsigned int __raid10_near_copies(int layout)
531 {
532 return layout & 0xFF;
533 }
534
535 /* Return md raid10 far copies for @layout */
__raid10_far_copies(int layout)536 static unsigned int __raid10_far_copies(int layout)
537 {
538 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
539 }
540
541 /* Return true if md raid10 offset for @layout */
__is_raid10_offset(int layout)542 static bool __is_raid10_offset(int layout)
543 {
544 return !!(layout & RAID10_OFFSET);
545 }
546
547 /* Return true if md raid10 near for @layout */
__is_raid10_near(int layout)548 static bool __is_raid10_near(int layout)
549 {
550 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
551 }
552
553 /* Return true if md raid10 far for @layout */
__is_raid10_far(int layout)554 static bool __is_raid10_far(int layout)
555 {
556 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
557 }
558
559 /* Return md raid10 layout string for @layout */
raid10_md_layout_to_format(int layout)560 static const char *raid10_md_layout_to_format(int layout)
561 {
562 /*
563 * Bit 16 stands for "offset"
564 * (i.e. adjacent stripes hold copies)
565 *
566 * Refer to MD's raid10.c for details
567 */
568 if (__is_raid10_offset(layout))
569 return "offset";
570
571 if (__raid10_near_copies(layout) > 1)
572 return "near";
573
574 if (__raid10_far_copies(layout) > 1)
575 return "far";
576
577 return "unknown";
578 }
579
580 /* Return md raid10 algorithm for @name */
raid10_name_to_format(const char * name)581 static int raid10_name_to_format(const char *name)
582 {
583 if (!strcasecmp(name, "near"))
584 return ALGORITHM_RAID10_NEAR;
585 else if (!strcasecmp(name, "offset"))
586 return ALGORITHM_RAID10_OFFSET;
587 else if (!strcasecmp(name, "far"))
588 return ALGORITHM_RAID10_FAR;
589
590 return -EINVAL;
591 }
592
593 /* Return md raid10 copies for @layout */
raid10_md_layout_to_copies(int layout)594 static unsigned int raid10_md_layout_to_copies(int layout)
595 {
596 return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
597 }
598
599 /* Return md raid10 format id for @format string */
raid10_format_to_md_layout(struct raid_set * rs,unsigned int algorithm,unsigned int copies)600 static int raid10_format_to_md_layout(struct raid_set *rs,
601 unsigned int algorithm,
602 unsigned int copies)
603 {
604 unsigned int n = 1, f = 1, r = 0;
605
606 /*
607 * MD resilienece flaw:
608 *
609 * enabling use_far_sets for far/offset formats causes copies
610 * to be colocated on the same devs together with their origins!
611 *
612 * -> disable it for now in the definition above
613 */
614 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
615 algorithm == ALGORITHM_RAID10_NEAR)
616 n = copies;
617
618 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
619 f = copies;
620 r = RAID10_OFFSET;
621 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
622 r |= RAID10_USE_FAR_SETS;
623
624 } else if (algorithm == ALGORITHM_RAID10_FAR) {
625 f = copies;
626 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
627 r |= RAID10_USE_FAR_SETS;
628
629 } else
630 return -EINVAL;
631
632 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
633 }
634 /* END: MD raid10 bit definitions and helpers */
635
636 /* Check for any of the raid10 algorithms */
__got_raid10(struct raid_type * rtp,const int layout)637 static bool __got_raid10(struct raid_type *rtp, const int layout)
638 {
639 if (rtp->level == 10) {
640 switch (rtp->algorithm) {
641 case ALGORITHM_RAID10_DEFAULT:
642 case ALGORITHM_RAID10_NEAR:
643 return __is_raid10_near(layout);
644 case ALGORITHM_RAID10_OFFSET:
645 return __is_raid10_offset(layout);
646 case ALGORITHM_RAID10_FAR:
647 return __is_raid10_far(layout);
648 default:
649 break;
650 }
651 }
652
653 return false;
654 }
655
656 /* Return raid_type for @name */
get_raid_type(const char * name)657 static struct raid_type *get_raid_type(const char *name)
658 {
659 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
660
661 while (rtp-- > raid_types)
662 if (!strcasecmp(rtp->name, name))
663 return rtp;
664
665 return NULL;
666 }
667
668 /* Return raid_type for @name based derived from @level and @layout */
get_raid_type_by_ll(const int level,const int layout)669 static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
670 {
671 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
672
673 while (rtp-- > raid_types) {
674 /* RAID10 special checks based on @layout flags/properties */
675 if (rtp->level == level &&
676 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
677 return rtp;
678 }
679
680 return NULL;
681 }
682
683 /* Adjust rdev sectors */
rs_set_rdev_sectors(struct raid_set * rs)684 static void rs_set_rdev_sectors(struct raid_set *rs)
685 {
686 struct mddev *mddev = &rs->md;
687 struct md_rdev *rdev;
688
689 /*
690 * raid10 sets rdev->sector to the device size, which
691 * is unintended in case of out-of-place reshaping
692 */
693 rdev_for_each(rdev, mddev)
694 if (!test_bit(Journal, &rdev->flags))
695 rdev->sectors = mddev->dev_sectors;
696 }
697
698 /*
699 * Change bdev capacity of @rs in case of a disk add/remove reshape
700 */
rs_set_capacity(struct raid_set * rs)701 static void rs_set_capacity(struct raid_set *rs)
702 {
703 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
704
705 set_capacity_and_notify(gendisk, rs->md.array_sectors);
706 }
707
708 /*
709 * Set the mddev properties in @rs to the current
710 * ones retrieved from the freshest superblock
711 */
rs_set_cur(struct raid_set * rs)712 static void rs_set_cur(struct raid_set *rs)
713 {
714 struct mddev *mddev = &rs->md;
715
716 mddev->new_level = mddev->level;
717 mddev->new_layout = mddev->layout;
718 mddev->new_chunk_sectors = mddev->chunk_sectors;
719 }
720
721 /*
722 * Set the mddev properties in @rs to the new
723 * ones requested by the ctr
724 */
rs_set_new(struct raid_set * rs)725 static void rs_set_new(struct raid_set *rs)
726 {
727 struct mddev *mddev = &rs->md;
728
729 mddev->level = mddev->new_level;
730 mddev->layout = mddev->new_layout;
731 mddev->chunk_sectors = mddev->new_chunk_sectors;
732 mddev->raid_disks = rs->raid_disks;
733 mddev->delta_disks = 0;
734 }
735
raid_set_alloc(struct dm_target * ti,struct raid_type * raid_type,unsigned int raid_devs)736 static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
737 unsigned int raid_devs)
738 {
739 unsigned int i;
740 struct raid_set *rs;
741
742 if (raid_devs <= raid_type->parity_devs) {
743 ti->error = "Insufficient number of devices";
744 return ERR_PTR(-EINVAL);
745 }
746
747 rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL);
748 if (!rs) {
749 ti->error = "Cannot allocate raid context";
750 return ERR_PTR(-ENOMEM);
751 }
752
753 if (mddev_init(&rs->md)) {
754 kfree(rs);
755 ti->error = "Cannot initialize raid context";
756 return ERR_PTR(-ENOMEM);
757 }
758
759 rs->raid_disks = raid_devs;
760 rs->delta_disks = 0;
761
762 rs->ti = ti;
763 rs->raid_type = raid_type;
764 rs->stripe_cache_entries = 256;
765 rs->md.raid_disks = raid_devs;
766 rs->md.level = raid_type->level;
767 rs->md.new_level = rs->md.level;
768 rs->md.layout = raid_type->algorithm;
769 rs->md.new_layout = rs->md.layout;
770 rs->md.delta_disks = 0;
771 rs->md.recovery_cp = MaxSector;
772
773 for (i = 0; i < raid_devs; i++)
774 md_rdev_init(&rs->dev[i].rdev);
775
776 /*
777 * Remaining items to be initialized by further RAID params:
778 * rs->md.persistent
779 * rs->md.external
780 * rs->md.chunk_sectors
781 * rs->md.new_chunk_sectors
782 * rs->md.dev_sectors
783 */
784
785 return rs;
786 }
787
788 /* Free all @rs allocations */
raid_set_free(struct raid_set * rs)789 static void raid_set_free(struct raid_set *rs)
790 {
791 int i;
792
793 if (rs->journal_dev.dev) {
794 md_rdev_clear(&rs->journal_dev.rdev);
795 dm_put_device(rs->ti, rs->journal_dev.dev);
796 }
797
798 for (i = 0; i < rs->raid_disks; i++) {
799 if (rs->dev[i].meta_dev)
800 dm_put_device(rs->ti, rs->dev[i].meta_dev);
801 md_rdev_clear(&rs->dev[i].rdev);
802 if (rs->dev[i].data_dev)
803 dm_put_device(rs->ti, rs->dev[i].data_dev);
804 }
805
806 mddev_destroy(&rs->md);
807 kfree(rs);
808 }
809
810 /*
811 * For every device we have two words
812 * <meta_dev>: meta device name or '-' if missing
813 * <data_dev>: data device name or '-' if missing
814 *
815 * The following are permitted:
816 * - -
817 * - <data_dev>
818 * <meta_dev> <data_dev>
819 *
820 * The following is not allowed:
821 * <meta_dev> -
822 *
823 * This code parses those words. If there is a failure,
824 * the caller must use raid_set_free() to unwind the operations.
825 */
parse_dev_params(struct raid_set * rs,struct dm_arg_set * as)826 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
827 {
828 int i;
829 int rebuild = 0;
830 int metadata_available = 0;
831 int r = 0;
832 const char *arg;
833
834 /* Put off the number of raid devices argument to get to dev pairs */
835 arg = dm_shift_arg(as);
836 if (!arg)
837 return -EINVAL;
838
839 for (i = 0; i < rs->raid_disks; i++) {
840 rs->dev[i].rdev.raid_disk = i;
841
842 rs->dev[i].meta_dev = NULL;
843 rs->dev[i].data_dev = NULL;
844
845 /*
846 * There are no offsets initially.
847 * Out of place reshape will set them accordingly.
848 */
849 rs->dev[i].rdev.data_offset = 0;
850 rs->dev[i].rdev.new_data_offset = 0;
851 rs->dev[i].rdev.mddev = &rs->md;
852
853 arg = dm_shift_arg(as);
854 if (!arg)
855 return -EINVAL;
856
857 if (strcmp(arg, "-")) {
858 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
859 &rs->dev[i].meta_dev);
860 if (r) {
861 rs->ti->error = "RAID metadata device lookup failure";
862 return r;
863 }
864
865 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
866 if (!rs->dev[i].rdev.sb_page) {
867 rs->ti->error = "Failed to allocate superblock page";
868 return -ENOMEM;
869 }
870 }
871
872 arg = dm_shift_arg(as);
873 if (!arg)
874 return -EINVAL;
875
876 if (!strcmp(arg, "-")) {
877 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
878 (!rs->dev[i].rdev.recovery_offset)) {
879 rs->ti->error = "Drive designated for rebuild not specified";
880 return -EINVAL;
881 }
882
883 if (rs->dev[i].meta_dev) {
884 rs->ti->error = "No data device supplied with metadata device";
885 return -EINVAL;
886 }
887
888 continue;
889 }
890
891 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
892 &rs->dev[i].data_dev);
893 if (r) {
894 rs->ti->error = "RAID device lookup failure";
895 return r;
896 }
897
898 if (rs->dev[i].meta_dev) {
899 metadata_available = 1;
900 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
901 }
902 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
903 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
904 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
905 rebuild++;
906 }
907
908 if (rs->journal_dev.dev)
909 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
910
911 if (metadata_available) {
912 rs->md.external = 0;
913 rs->md.persistent = 1;
914 rs->md.major_version = 2;
915 } else if (rebuild && !rs->md.recovery_cp) {
916 /*
917 * Without metadata, we will not be able to tell if the array
918 * is in-sync or not - we must assume it is not. Therefore,
919 * it is impossible to rebuild a drive.
920 *
921 * Even if there is metadata, the on-disk information may
922 * indicate that the array is not in-sync and it will then
923 * fail at that time.
924 *
925 * User could specify 'nosync' option if desperate.
926 */
927 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
928 return -EINVAL;
929 }
930
931 return 0;
932 }
933
934 /*
935 * validate_region_size
936 * @rs
937 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
938 *
939 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
940 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
941 *
942 * Returns: 0 on success, -EINVAL on failure.
943 */
validate_region_size(struct raid_set * rs,unsigned long region_size)944 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
945 {
946 unsigned long min_region_size = rs->ti->len / (1 << 21);
947
948 if (rs_is_raid0(rs))
949 return 0;
950
951 if (!region_size) {
952 /*
953 * Choose a reasonable default. All figures in sectors.
954 */
955 if (min_region_size > (1 << 13)) {
956 /* If not a power of 2, make it the next power of 2 */
957 region_size = roundup_pow_of_two(min_region_size);
958 DMINFO("Choosing default region size of %lu sectors",
959 region_size);
960 } else {
961 DMINFO("Choosing default region size of 4MiB");
962 region_size = 1 << 13; /* sectors */
963 }
964 } else {
965 /*
966 * Validate user-supplied value.
967 */
968 if (region_size > rs->ti->len) {
969 rs->ti->error = "Supplied region size is too large";
970 return -EINVAL;
971 }
972
973 if (region_size < min_region_size) {
974 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
975 region_size, min_region_size);
976 rs->ti->error = "Supplied region size is too small";
977 return -EINVAL;
978 }
979
980 if (!is_power_of_2(region_size)) {
981 rs->ti->error = "Region size is not a power of 2";
982 return -EINVAL;
983 }
984
985 if (region_size < rs->md.chunk_sectors) {
986 rs->ti->error = "Region size is smaller than the chunk size";
987 return -EINVAL;
988 }
989 }
990
991 /*
992 * Convert sectors to bytes.
993 */
994 rs->md.bitmap_info.chunksize = to_bytes(region_size);
995
996 return 0;
997 }
998
999 /*
1000 * validate_raid_redundancy
1001 * @rs
1002 *
1003 * Determine if there are enough devices in the array that haven't
1004 * failed (or are being rebuilt) to form a usable array.
1005 *
1006 * Returns: 0 on success, -EINVAL on failure.
1007 */
validate_raid_redundancy(struct raid_set * rs)1008 static int validate_raid_redundancy(struct raid_set *rs)
1009 {
1010 unsigned int i, rebuild_cnt = 0;
1011 unsigned int rebuilds_per_group = 0, copies, raid_disks;
1012 unsigned int group_size, last_group_start;
1013
1014 for (i = 0; i < rs->raid_disks; i++)
1015 if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
1016 ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1017 !rs->dev[i].rdev.sb_page)))
1018 rebuild_cnt++;
1019
1020 switch (rs->md.level) {
1021 case 0:
1022 break;
1023 case 1:
1024 if (rebuild_cnt >= rs->md.raid_disks)
1025 goto too_many;
1026 break;
1027 case 4:
1028 case 5:
1029 case 6:
1030 if (rebuild_cnt > rs->raid_type->parity_devs)
1031 goto too_many;
1032 break;
1033 case 10:
1034 copies = raid10_md_layout_to_copies(rs->md.new_layout);
1035 if (copies < 2) {
1036 DMERR("Bogus raid10 data copies < 2!");
1037 return -EINVAL;
1038 }
1039
1040 if (rebuild_cnt < copies)
1041 break;
1042
1043 /*
1044 * It is possible to have a higher rebuild count for RAID10,
1045 * as long as the failed devices occur in different mirror
1046 * groups (i.e. different stripes).
1047 *
1048 * When checking "near" format, make sure no adjacent devices
1049 * have failed beyond what can be handled. In addition to the
1050 * simple case where the number of devices is a multiple of the
1051 * number of copies, we must also handle cases where the number
1052 * of devices is not a multiple of the number of copies.
1053 * E.g. dev1 dev2 dev3 dev4 dev5
1054 * A A B B C
1055 * C D D E E
1056 */
1057 raid_disks = min(rs->raid_disks, rs->md.raid_disks);
1058 if (__is_raid10_near(rs->md.new_layout)) {
1059 for (i = 0; i < raid_disks; i++) {
1060 if (!(i % copies))
1061 rebuilds_per_group = 0;
1062 if ((!rs->dev[i].rdev.sb_page ||
1063 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1064 (++rebuilds_per_group >= copies))
1065 goto too_many;
1066 }
1067 break;
1068 }
1069
1070 /*
1071 * When checking "far" and "offset" formats, we need to ensure
1072 * that the device that holds its copy is not also dead or
1073 * being rebuilt. (Note that "far" and "offset" formats only
1074 * support two copies right now. These formats also only ever
1075 * use the 'use_far_sets' variant.)
1076 *
1077 * This check is somewhat complicated by the need to account
1078 * for arrays that are not a multiple of (far) copies. This
1079 * results in the need to treat the last (potentially larger)
1080 * set differently.
1081 */
1082 group_size = (raid_disks / copies);
1083 last_group_start = (raid_disks / group_size) - 1;
1084 last_group_start *= group_size;
1085 for (i = 0; i < raid_disks; i++) {
1086 if (!(i % copies) && !(i > last_group_start))
1087 rebuilds_per_group = 0;
1088 if ((!rs->dev[i].rdev.sb_page ||
1089 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1090 (++rebuilds_per_group >= copies))
1091 goto too_many;
1092 }
1093 break;
1094 default:
1095 if (rebuild_cnt)
1096 return -EINVAL;
1097 }
1098
1099 return 0;
1100
1101 too_many:
1102 return -EINVAL;
1103 }
1104
1105 /*
1106 * Possible arguments are...
1107 * <chunk_size> [optional_args]
1108 *
1109 * Argument definitions
1110 * <chunk_size> The number of sectors per disk that
1111 * will form the "stripe"
1112 * [[no]sync] Force or prevent recovery of the
1113 * entire array
1114 * [rebuild <idx>] Rebuild the drive indicated by the index
1115 * [daemon_sleep <ms>] Time between bitmap daemon work to
1116 * clear bits
1117 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1118 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1119 * [write_mostly <idx>] Indicate a write mostly drive via index
1120 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
1121 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
1122 * [region_size <sectors>] Defines granularity of bitmap
1123 * [journal_dev <dev>] raid4/5/6 journaling deviice
1124 * (i.e. write hole closing log)
1125 *
1126 * RAID10-only options:
1127 * [raid10_copies <# copies>] Number of copies. (Default: 2)
1128 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
1129 */
parse_raid_params(struct raid_set * rs,struct dm_arg_set * as,unsigned int num_raid_params)1130 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1131 unsigned int num_raid_params)
1132 {
1133 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1134 unsigned int raid10_copies = 2;
1135 unsigned int i, write_mostly = 0;
1136 unsigned int region_size = 0;
1137 sector_t max_io_len;
1138 const char *arg, *key;
1139 struct raid_dev *rd;
1140 struct raid_type *rt = rs->raid_type;
1141
1142 arg = dm_shift_arg(as);
1143 num_raid_params--; /* Account for chunk_size argument */
1144
1145 if (kstrtoint(arg, 10, &value) < 0) {
1146 rs->ti->error = "Bad numerical argument given for chunk_size";
1147 return -EINVAL;
1148 }
1149
1150 /*
1151 * First, parse the in-order required arguments
1152 * "chunk_size" is the only argument of this type.
1153 */
1154 if (rt_is_raid1(rt)) {
1155 if (value)
1156 DMERR("Ignoring chunk size parameter for RAID 1");
1157 value = 0;
1158 } else if (!is_power_of_2(value)) {
1159 rs->ti->error = "Chunk size must be a power of 2";
1160 return -EINVAL;
1161 } else if (value < 8) {
1162 rs->ti->error = "Chunk size value is too small";
1163 return -EINVAL;
1164 }
1165
1166 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1167
1168 /*
1169 * We set each individual device as In_sync with a completed
1170 * 'recovery_offset'. If there has been a device failure or
1171 * replacement then one of the following cases applies:
1172 *
1173 * 1) User specifies 'rebuild'.
1174 * - Device is reset when param is read.
1175 * 2) A new device is supplied.
1176 * - No matching superblock found, resets device.
1177 * 3) Device failure was transient and returns on reload.
1178 * - Failure noticed, resets device for bitmap replay.
1179 * 4) Device hadn't completed recovery after previous failure.
1180 * - Superblock is read and overrides recovery_offset.
1181 *
1182 * What is found in the superblocks of the devices is always
1183 * authoritative, unless 'rebuild' or '[no]sync' was specified.
1184 */
1185 for (i = 0; i < rs->raid_disks; i++) {
1186 set_bit(In_sync, &rs->dev[i].rdev.flags);
1187 rs->dev[i].rdev.recovery_offset = MaxSector;
1188 }
1189
1190 /*
1191 * Second, parse the unordered optional arguments
1192 */
1193 for (i = 0; i < num_raid_params; i++) {
1194 key = dm_shift_arg(as);
1195 if (!key) {
1196 rs->ti->error = "Not enough raid parameters given";
1197 return -EINVAL;
1198 }
1199
1200 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1201 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1202 rs->ti->error = "Only one 'nosync' argument allowed";
1203 return -EINVAL;
1204 }
1205 continue;
1206 }
1207 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1208 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1209 rs->ti->error = "Only one 'sync' argument allowed";
1210 return -EINVAL;
1211 }
1212 continue;
1213 }
1214 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1215 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1216 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1217 return -EINVAL;
1218 }
1219 continue;
1220 }
1221
1222 arg = dm_shift_arg(as);
1223 i++; /* Account for the argument pairs */
1224 if (!arg) {
1225 rs->ti->error = "Wrong number of raid parameters given";
1226 return -EINVAL;
1227 }
1228
1229 /*
1230 * Parameters that take a string value are checked here.
1231 */
1232 /* "raid10_format {near|offset|far} */
1233 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1234 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1235 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1236 return -EINVAL;
1237 }
1238 if (!rt_is_raid10(rt)) {
1239 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1240 return -EINVAL;
1241 }
1242 raid10_format = raid10_name_to_format(arg);
1243 if (raid10_format < 0) {
1244 rs->ti->error = "Invalid 'raid10_format' value given";
1245 return raid10_format;
1246 }
1247 continue;
1248 }
1249
1250 /* "journal_dev <dev>" */
1251 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
1252 int r;
1253 struct md_rdev *jdev;
1254
1255 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1256 rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
1257 return -EINVAL;
1258 }
1259 if (!rt_is_raid456(rt)) {
1260 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
1261 return -EINVAL;
1262 }
1263 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
1264 &rs->journal_dev.dev);
1265 if (r) {
1266 rs->ti->error = "raid4/5/6 journal device lookup failure";
1267 return r;
1268 }
1269 jdev = &rs->journal_dev.rdev;
1270 md_rdev_init(jdev);
1271 jdev->mddev = &rs->md;
1272 jdev->bdev = rs->journal_dev.dev->bdev;
1273 jdev->sectors = bdev_nr_sectors(jdev->bdev);
1274 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
1275 rs->ti->error = "No space for raid4/5/6 journal";
1276 return -ENOSPC;
1277 }
1278 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
1279 set_bit(Journal, &jdev->flags);
1280 continue;
1281 }
1282
1283 /* "journal_mode <mode>" ("journal_dev" mandatory!) */
1284 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) {
1285 int r;
1286
1287 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1288 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'";
1289 return -EINVAL;
1290 }
1291 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
1292 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed";
1293 return -EINVAL;
1294 }
1295 r = dm_raid_journal_mode_to_md(arg);
1296 if (r < 0) {
1297 rs->ti->error = "Invalid 'journal_mode' argument";
1298 return r;
1299 }
1300 rs->journal_dev.mode = r;
1301 continue;
1302 }
1303
1304 /*
1305 * Parameters with number values from here on.
1306 */
1307 if (kstrtoint(arg, 10, &value) < 0) {
1308 rs->ti->error = "Bad numerical argument given in raid params";
1309 return -EINVAL;
1310 }
1311
1312 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1313 /*
1314 * "rebuild" is being passed in by userspace to provide
1315 * indexes of replaced devices and to set up additional
1316 * devices on raid level takeover.
1317 */
1318 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1319 rs->ti->error = "Invalid rebuild index given";
1320 return -EINVAL;
1321 }
1322
1323 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1324 rs->ti->error = "rebuild for this index already given";
1325 return -EINVAL;
1326 }
1327
1328 rd = rs->dev + value;
1329 clear_bit(In_sync, &rd->rdev.flags);
1330 clear_bit(Faulty, &rd->rdev.flags);
1331 rd->rdev.recovery_offset = 0;
1332 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1333 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1334 if (!rt_is_raid1(rt)) {
1335 rs->ti->error = "write_mostly option is only valid for RAID1";
1336 return -EINVAL;
1337 }
1338
1339 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1340 rs->ti->error = "Invalid write_mostly index given";
1341 return -EINVAL;
1342 }
1343
1344 write_mostly++;
1345 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1346 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1347 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1348 if (!rt_is_raid1(rt)) {
1349 rs->ti->error = "max_write_behind option is only valid for RAID1";
1350 return -EINVAL;
1351 }
1352
1353 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1354 rs->ti->error = "Only one max_write_behind argument pair allowed";
1355 return -EINVAL;
1356 }
1357
1358 /*
1359 * In device-mapper, we specify things in sectors, but
1360 * MD records this value in kB
1361 */
1362 if (value < 0 || value / 2 > COUNTER_MAX) {
1363 rs->ti->error = "Max write-behind limit out of range";
1364 return -EINVAL;
1365 }
1366
1367 rs->md.bitmap_info.max_write_behind = value / 2;
1368 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1369 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1370 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1371 return -EINVAL;
1372 }
1373 if (value < 0) {
1374 rs->ti->error = "daemon sleep period out of range";
1375 return -EINVAL;
1376 }
1377 rs->md.bitmap_info.daemon_sleep = value;
1378 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1379 /* Userspace passes new data_offset after having extended the data image LV */
1380 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1381 rs->ti->error = "Only one data_offset argument pair allowed";
1382 return -EINVAL;
1383 }
1384 /* Ensure sensible data offset */
1385 if (value < 0 ||
1386 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1387 rs->ti->error = "Bogus data_offset value";
1388 return -EINVAL;
1389 }
1390 rs->data_offset = value;
1391 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1392 /* Define the +/-# of disks to add to/remove from the given raid set */
1393 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1394 rs->ti->error = "Only one delta_disks argument pair allowed";
1395 return -EINVAL;
1396 }
1397 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
1398 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1399 rs->ti->error = "Too many delta_disk requested";
1400 return -EINVAL;
1401 }
1402
1403 rs->delta_disks = value;
1404 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1405 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1406 rs->ti->error = "Only one stripe_cache argument pair allowed";
1407 return -EINVAL;
1408 }
1409
1410 if (!rt_is_raid456(rt)) {
1411 rs->ti->error = "Inappropriate argument: stripe_cache";
1412 return -EINVAL;
1413 }
1414
1415 if (value < 0) {
1416 rs->ti->error = "Bogus stripe cache entries value";
1417 return -EINVAL;
1418 }
1419 rs->stripe_cache_entries = value;
1420 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1421 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1422 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1423 return -EINVAL;
1424 }
1425
1426 if (value < 0) {
1427 rs->ti->error = "min_recovery_rate out of range";
1428 return -EINVAL;
1429 }
1430 rs->md.sync_speed_min = value;
1431 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1432 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
1433 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1434 return -EINVAL;
1435 }
1436
1437 if (value < 0) {
1438 rs->ti->error = "max_recovery_rate out of range";
1439 return -EINVAL;
1440 }
1441 rs->md.sync_speed_max = value;
1442 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1443 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1444 rs->ti->error = "Only one region_size argument pair allowed";
1445 return -EINVAL;
1446 }
1447
1448 region_size = value;
1449 rs->requested_bitmap_chunk_sectors = value;
1450 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1451 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1452 rs->ti->error = "Only one raid10_copies argument pair allowed";
1453 return -EINVAL;
1454 }
1455
1456 if (!__within_range(value, 2, rs->md.raid_disks)) {
1457 rs->ti->error = "Bad value for 'raid10_copies'";
1458 return -EINVAL;
1459 }
1460
1461 raid10_copies = value;
1462 } else {
1463 DMERR("Unable to parse RAID parameter: %s", key);
1464 rs->ti->error = "Unable to parse RAID parameter";
1465 return -EINVAL;
1466 }
1467 }
1468
1469 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1470 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1471 rs->ti->error = "sync and nosync are mutually exclusive";
1472 return -EINVAL;
1473 }
1474
1475 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
1476 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
1477 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
1478 rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
1479 return -EINVAL;
1480 }
1481
1482 if (write_mostly >= rs->md.raid_disks) {
1483 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1484 return -EINVAL;
1485 }
1486
1487 if (rs->md.sync_speed_max &&
1488 rs->md.sync_speed_min > rs->md.sync_speed_max) {
1489 rs->ti->error = "Bogus recovery rates";
1490 return -EINVAL;
1491 }
1492
1493 if (validate_region_size(rs, region_size))
1494 return -EINVAL;
1495
1496 if (rs->md.chunk_sectors)
1497 max_io_len = rs->md.chunk_sectors;
1498 else
1499 max_io_len = region_size;
1500
1501 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1502 return -EINVAL;
1503
1504 if (rt_is_raid10(rt)) {
1505 if (raid10_copies > rs->md.raid_disks) {
1506 rs->ti->error = "Not enough devices to satisfy specification";
1507 return -EINVAL;
1508 }
1509
1510 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1511 if (rs->md.new_layout < 0) {
1512 rs->ti->error = "Error getting raid10 format";
1513 return rs->md.new_layout;
1514 }
1515
1516 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1517 if (!rt) {
1518 rs->ti->error = "Failed to recognize new raid10 layout";
1519 return -EINVAL;
1520 }
1521
1522 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1523 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1524 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1525 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1526 return -EINVAL;
1527 }
1528 }
1529
1530 rs->raid10_copies = raid10_copies;
1531
1532 /* Assume there are no metadata devices until the drives are parsed */
1533 rs->md.persistent = 0;
1534 rs->md.external = 1;
1535
1536 /* Check, if any invalid ctr arguments have been passed in for the raid level */
1537 return rs_check_for_valid_flags(rs);
1538 }
1539
1540 /* Set raid4/5/6 cache size */
rs_set_raid456_stripe_cache(struct raid_set * rs)1541 static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1542 {
1543 int r;
1544 struct r5conf *conf;
1545 struct mddev *mddev = &rs->md;
1546 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1547 uint32_t nr_stripes = rs->stripe_cache_entries;
1548
1549 if (!rt_is_raid456(rs->raid_type)) {
1550 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1551 return -EINVAL;
1552 }
1553
1554 if (nr_stripes < min_stripes) {
1555 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1556 nr_stripes, min_stripes);
1557 nr_stripes = min_stripes;
1558 }
1559
1560 conf = mddev->private;
1561 if (!conf) {
1562 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1563 return -EINVAL;
1564 }
1565
1566 /* Try setting number of stripes in raid456 stripe cache */
1567 if (conf->min_nr_stripes != nr_stripes) {
1568 r = raid5_set_cache_size(mddev, nr_stripes);
1569 if (r) {
1570 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1571 return r;
1572 }
1573
1574 DMINFO("%u stripe cache entries", nr_stripes);
1575 }
1576
1577 return 0;
1578 }
1579
1580 /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
mddev_data_stripes(struct raid_set * rs)1581 static unsigned int mddev_data_stripes(struct raid_set *rs)
1582 {
1583 return rs->md.raid_disks - rs->raid_type->parity_devs;
1584 }
1585
1586 /* Return # of data stripes of @rs (i.e. as of ctr) */
rs_data_stripes(struct raid_set * rs)1587 static unsigned int rs_data_stripes(struct raid_set *rs)
1588 {
1589 return rs->raid_disks - rs->raid_type->parity_devs;
1590 }
1591
1592 /*
1593 * Retrieve rdev->sectors from any valid raid device of @rs
1594 * to allow userpace to pass in arbitray "- -" device tupples.
1595 */
__rdev_sectors(struct raid_set * rs)1596 static sector_t __rdev_sectors(struct raid_set *rs)
1597 {
1598 int i;
1599
1600 for (i = 0; i < rs->raid_disks; i++) {
1601 struct md_rdev *rdev = &rs->dev[i].rdev;
1602
1603 if (!test_bit(Journal, &rdev->flags) &&
1604 rdev->bdev && rdev->sectors)
1605 return rdev->sectors;
1606 }
1607
1608 return 0;
1609 }
1610
1611 /* Check that calculated dev_sectors fits all component devices. */
_check_data_dev_sectors(struct raid_set * rs)1612 static int _check_data_dev_sectors(struct raid_set *rs)
1613 {
1614 sector_t ds = ~0;
1615 struct md_rdev *rdev;
1616
1617 rdev_for_each(rdev, &rs->md)
1618 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
1619 ds = min(ds, bdev_nr_sectors(rdev->bdev));
1620 if (ds < rs->md.dev_sectors) {
1621 rs->ti->error = "Component device(s) too small";
1622 return -EINVAL;
1623 }
1624 }
1625
1626 return 0;
1627 }
1628
1629 /* Get reshape sectors from data_offsets or raid set */
_get_reshape_sectors(struct raid_set * rs)1630 static sector_t _get_reshape_sectors(struct raid_set *rs)
1631 {
1632 struct md_rdev *rdev;
1633 sector_t reshape_sectors = 0;
1634
1635 rdev_for_each(rdev, &rs->md)
1636 if (!test_bit(Journal, &rdev->flags)) {
1637 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
1638 rdev->data_offset - rdev->new_data_offset :
1639 rdev->new_data_offset - rdev->data_offset;
1640 break;
1641 }
1642
1643 return max(reshape_sectors, (sector_t) rs->data_offset);
1644 }
1645
1646 /* Calculate the sectors per device and per array used for @rs */
rs_set_dev_and_array_sectors(struct raid_set * rs,sector_t sectors,bool use_mddev)1647 static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev)
1648 {
1649 int delta_disks;
1650 unsigned int data_stripes;
1651 sector_t array_sectors = sectors, dev_sectors = sectors;
1652 struct mddev *mddev = &rs->md;
1653
1654 if (use_mddev) {
1655 delta_disks = mddev->delta_disks;
1656 data_stripes = mddev_data_stripes(rs);
1657 } else {
1658 delta_disks = rs->delta_disks;
1659 data_stripes = rs_data_stripes(rs);
1660 }
1661
1662 /* Special raid1 case w/o delta_disks support (yet) */
1663 if (rt_is_raid1(rs->raid_type))
1664 ;
1665 else if (rt_is_raid10(rs->raid_type)) {
1666 if (rs->raid10_copies < 2 ||
1667 delta_disks < 0) {
1668 rs->ti->error = "Bogus raid10 data copies or delta disks";
1669 return -EINVAL;
1670 }
1671
1672 dev_sectors *= rs->raid10_copies;
1673 if (sector_div(dev_sectors, data_stripes))
1674 goto bad;
1675
1676 array_sectors = (data_stripes + delta_disks) * (dev_sectors - _get_reshape_sectors(rs));
1677 if (sector_div(array_sectors, rs->raid10_copies))
1678 goto bad;
1679
1680 } else if (sector_div(dev_sectors, data_stripes))
1681 goto bad;
1682
1683 else
1684 /* Striped layouts */
1685 array_sectors = (data_stripes + delta_disks) * (dev_sectors - _get_reshape_sectors(rs));
1686
1687 mddev->array_sectors = array_sectors;
1688 mddev->dev_sectors = dev_sectors;
1689 rs_set_rdev_sectors(rs);
1690
1691 return _check_data_dev_sectors(rs);
1692 bad:
1693 rs->ti->error = "Target length not divisible by number of data devices";
1694 return -EINVAL;
1695 }
1696
1697 /* Setup recovery on @rs */
rs_setup_recovery(struct raid_set * rs,sector_t dev_sectors)1698 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1699 {
1700 /* raid0 does not recover */
1701 if (rs_is_raid0(rs))
1702 rs->md.recovery_cp = MaxSector;
1703 /*
1704 * A raid6 set has to be recovered either
1705 * completely or for the grown part to
1706 * ensure proper parity and Q-Syndrome
1707 */
1708 else if (rs_is_raid6(rs))
1709 rs->md.recovery_cp = dev_sectors;
1710 /*
1711 * Other raid set types may skip recovery
1712 * depending on the 'nosync' flag.
1713 */
1714 else
1715 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1716 ? MaxSector : dev_sectors;
1717 }
1718
do_table_event(struct work_struct * ws)1719 static void do_table_event(struct work_struct *ws)
1720 {
1721 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1722
1723 smp_rmb(); /* Make sure we access most actual mddev properties */
1724
1725 /* Only grow size resulting from added stripe(s) after reshape ended. */
1726 if (!rs_is_reshaping(rs) &&
1727 rs->array_sectors > rs->md.array_sectors &&
1728 !rs->md.delta_disks &&
1729 rs->md.raid_disks == rs->raid_disks) {
1730 /* The raid10 personality doesn't provide proper device sizes -> correct. */
1731 if (rs_is_raid10(rs))
1732 rs_set_rdev_sectors(rs);
1733
1734 rs->md.array_sectors = rs->array_sectors;
1735 rs_set_capacity(rs);
1736 }
1737
1738 dm_table_event(rs->ti->table);
1739 }
1740
1741 /*
1742 * Make sure a valid takover (level switch) is being requested on @rs
1743 *
1744 * Conversions of raid sets from one MD personality to another
1745 * have to conform to restrictions which are enforced here.
1746 */
rs_check_takeover(struct raid_set * rs)1747 static int rs_check_takeover(struct raid_set *rs)
1748 {
1749 struct mddev *mddev = &rs->md;
1750 unsigned int near_copies;
1751
1752 if (rs->md.degraded) {
1753 rs->ti->error = "Can't takeover degraded raid set";
1754 return -EPERM;
1755 }
1756
1757 if (rs_is_reshaping(rs)) {
1758 rs->ti->error = "Can't takeover reshaping raid set";
1759 return -EPERM;
1760 }
1761
1762 switch (mddev->level) {
1763 case 0:
1764 /* raid0 -> raid1/5 with one disk */
1765 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1766 mddev->raid_disks == 1)
1767 return 0;
1768
1769 /* raid0 -> raid10 */
1770 if (mddev->new_level == 10 &&
1771 !(rs->raid_disks % mddev->raid_disks))
1772 return 0;
1773
1774 /* raid0 with multiple disks -> raid4/5/6 */
1775 if (__within_range(mddev->new_level, 4, 6) &&
1776 mddev->new_layout == ALGORITHM_PARITY_N &&
1777 mddev->raid_disks > 1)
1778 return 0;
1779
1780 break;
1781
1782 case 10:
1783 /* Can't takeover raid10_offset! */
1784 if (__is_raid10_offset(mddev->layout))
1785 break;
1786
1787 near_copies = __raid10_near_copies(mddev->layout);
1788
1789 /* raid10* -> raid0 */
1790 if (mddev->new_level == 0) {
1791 /* Can takeover raid10_near with raid disks divisable by data copies! */
1792 if (near_copies > 1 &&
1793 !(mddev->raid_disks % near_copies)) {
1794 mddev->raid_disks /= near_copies;
1795 mddev->delta_disks = mddev->raid_disks;
1796 return 0;
1797 }
1798
1799 /* Can takeover raid10_far */
1800 if (near_copies == 1 &&
1801 __raid10_far_copies(mddev->layout) > 1)
1802 return 0;
1803
1804 break;
1805 }
1806
1807 /* raid10_{near,far} -> raid1 */
1808 if (mddev->new_level == 1 &&
1809 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1810 return 0;
1811
1812 /* raid10_{near,far} with 2 disks -> raid4/5 */
1813 if (__within_range(mddev->new_level, 4, 5) &&
1814 mddev->raid_disks == 2)
1815 return 0;
1816 break;
1817
1818 case 1:
1819 /* raid1 with 2 disks -> raid4/5 */
1820 if (__within_range(mddev->new_level, 4, 5) &&
1821 mddev->raid_disks == 2) {
1822 mddev->degraded = 1;
1823 return 0;
1824 }
1825
1826 /* raid1 -> raid0 */
1827 if (mddev->new_level == 0 &&
1828 mddev->raid_disks == 1)
1829 return 0;
1830
1831 /* raid1 -> raid10 */
1832 if (mddev->new_level == 10)
1833 return 0;
1834 break;
1835
1836 case 4:
1837 /* raid4 -> raid0 */
1838 if (mddev->new_level == 0)
1839 return 0;
1840
1841 /* raid4 -> raid1/5 with 2 disks */
1842 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1843 mddev->raid_disks == 2)
1844 return 0;
1845
1846 /* raid4 -> raid5/6 with parity N */
1847 if (__within_range(mddev->new_level, 5, 6) &&
1848 mddev->layout == ALGORITHM_PARITY_N)
1849 return 0;
1850 break;
1851
1852 case 5:
1853 /* raid5 with parity N -> raid0 */
1854 if (mddev->new_level == 0 &&
1855 mddev->layout == ALGORITHM_PARITY_N)
1856 return 0;
1857
1858 /* raid5 with parity N -> raid4 */
1859 if (mddev->new_level == 4 &&
1860 mddev->layout == ALGORITHM_PARITY_N)
1861 return 0;
1862
1863 /* raid5 with 2 disks -> raid1/4/10 */
1864 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1865 mddev->raid_disks == 2)
1866 return 0;
1867
1868 /* raid5_* -> raid6_*_6 with Q-Syndrome N (e.g. raid5_ra -> raid6_ra_6 */
1869 if (mddev->new_level == 6 &&
1870 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1871 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1872 return 0;
1873 break;
1874
1875 case 6:
1876 /* raid6 with parity N -> raid0 */
1877 if (mddev->new_level == 0 &&
1878 mddev->layout == ALGORITHM_PARITY_N)
1879 return 0;
1880
1881 /* raid6 with parity N -> raid4 */
1882 if (mddev->new_level == 4 &&
1883 mddev->layout == ALGORITHM_PARITY_N)
1884 return 0;
1885
1886 /* raid6_*_n with Q-Syndrome N -> raid5_* */
1887 if (mddev->new_level == 5 &&
1888 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1889 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1890 return 0;
1891 break;
1892
1893 default:
1894 break;
1895 }
1896
1897 rs->ti->error = "takeover not possible";
1898 return -EINVAL;
1899 }
1900
1901 /* True if @rs requested to be taken over */
rs_takeover_requested(struct raid_set * rs)1902 static bool rs_takeover_requested(struct raid_set *rs)
1903 {
1904 return rs->md.new_level != rs->md.level;
1905 }
1906
1907 /* True if layout is set to reshape. */
rs_is_layout_change(struct raid_set * rs,bool use_mddev)1908 static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
1909 {
1910 return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
1911 rs->md.new_layout != rs->md.layout ||
1912 rs->md.new_chunk_sectors != rs->md.chunk_sectors;
1913 }
1914
1915 /* True if @rs is requested to reshape by ctr */
rs_reshape_requested(struct raid_set * rs)1916 static bool rs_reshape_requested(struct raid_set *rs)
1917 {
1918 bool change;
1919 struct mddev *mddev = &rs->md;
1920
1921 if (rs_takeover_requested(rs))
1922 return false;
1923
1924 if (rs_is_raid0(rs))
1925 return false;
1926
1927 change = rs_is_layout_change(rs, false);
1928
1929 /* Historical case to support raid1 reshape without delta disks */
1930 if (rs_is_raid1(rs)) {
1931 if (rs->delta_disks)
1932 return !!rs->delta_disks;
1933
1934 return !change &&
1935 mddev->raid_disks != rs->raid_disks;
1936 }
1937
1938 if (rs_is_raid10(rs))
1939 return change &&
1940 !__is_raid10_far(mddev->new_layout) &&
1941 rs->delta_disks >= 0;
1942
1943 return change;
1944 }
1945
1946 /* Features */
1947 #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */
1948
1949 /* State flags for sb->flags */
1950 #define SB_FLAG_RESHAPE_ACTIVE 0x1
1951 #define SB_FLAG_RESHAPE_BACKWARDS 0x2
1952
1953 /*
1954 * This structure is never routinely used by userspace, unlike md superblocks.
1955 * Devices with this superblock should only ever be accessed via device-mapper.
1956 */
1957 #define DM_RAID_MAGIC 0x64526D44
1958 struct dm_raid_superblock {
1959 __le32 magic; /* "DmRd" */
1960 __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */
1961
1962 __le32 num_devices; /* Number of devices in this raid set. (Max 64) */
1963 __le32 array_position; /* The position of this drive in the raid set */
1964
1965 __le64 events; /* Incremented by md when superblock updated */
1966 __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */
1967 /* indicate failures (see extension below) */
1968
1969 /*
1970 * This offset tracks the progress of the repair or replacement of
1971 * an individual drive.
1972 */
1973 __le64 disk_recovery_offset;
1974
1975 /*
1976 * This offset tracks the progress of the initial raid set
1977 * synchronisation/parity calculation.
1978 */
1979 __le64 array_resync_offset;
1980
1981 /*
1982 * raid characteristics
1983 */
1984 __le32 level;
1985 __le32 layout;
1986 __le32 stripe_sectors;
1987
1988 /********************************************************************
1989 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
1990 *
1991 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
1992 */
1993
1994 __le32 flags; /* Flags defining array states for reshaping */
1995
1996 /*
1997 * This offset tracks the progress of a raid
1998 * set reshape in order to be able to restart it
1999 */
2000 __le64 reshape_position;
2001
2002 /*
2003 * These define the properties of the array in case of an interrupted reshape
2004 */
2005 __le32 new_level;
2006 __le32 new_layout;
2007 __le32 new_stripe_sectors;
2008 __le32 delta_disks;
2009
2010 __le64 array_sectors; /* Array size in sectors */
2011
2012 /*
2013 * Sector offsets to data on devices (reshaping).
2014 * Needed to support out of place reshaping, thus
2015 * not writing over any stripes whilst converting
2016 * them from old to new layout
2017 */
2018 __le64 data_offset;
2019 __le64 new_data_offset;
2020
2021 __le64 sectors; /* Used device size in sectors */
2022
2023 /*
2024 * Additional Bit field of devices indicating failures to support
2025 * up to 256 devices with the 1.9.0 on-disk metadata format
2026 */
2027 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
2028
2029 __le32 incompat_features; /* Used to indicate any incompatible features */
2030
2031 /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */
2032 } __packed;
2033
2034 /*
2035 * Check for reshape constraints on raid set @rs:
2036 *
2037 * - reshape function non-existent
2038 * - degraded set
2039 * - ongoing recovery
2040 * - ongoing reshape
2041 *
2042 * Returns 0 if none or -EPERM if given constraint
2043 * and error message reference in @errmsg
2044 */
rs_check_reshape(struct raid_set * rs)2045 static int rs_check_reshape(struct raid_set *rs)
2046 {
2047 struct mddev *mddev = &rs->md;
2048
2049 if (!mddev->pers || !mddev->pers->check_reshape)
2050 rs->ti->error = "Reshape not supported";
2051 else if (mddev->degraded)
2052 rs->ti->error = "Can't reshape degraded raid set";
2053 else if (rs_is_recovering(rs))
2054 rs->ti->error = "Convert request on recovering raid set prohibited";
2055 else if (rs_is_reshaping(rs))
2056 rs->ti->error = "raid set already reshaping!";
2057 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
2058 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
2059 else
2060 return 0;
2061
2062 return -EPERM;
2063 }
2064
read_disk_sb(struct md_rdev * rdev,int size,bool force_reload)2065 static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2066 {
2067 BUG_ON(!rdev->sb_page);
2068
2069 if (rdev->sb_loaded && !force_reload)
2070 return 0;
2071
2072 rdev->sb_loaded = 0;
2073
2074 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) {
2075 DMERR("Failed to read superblock of device at position %d",
2076 rdev->raid_disk);
2077 md_error(rdev->mddev, rdev);
2078 set_bit(Faulty, &rdev->flags);
2079 return -EIO;
2080 }
2081
2082 rdev->sb_loaded = 1;
2083
2084 return 0;
2085 }
2086
sb_retrieve_failed_devices(struct dm_raid_superblock * sb,uint64_t * failed_devices)2087 static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2088 {
2089 failed_devices[0] = le64_to_cpu(sb->failed_devices);
2090 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
2091
2092 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2093 int i = ARRAY_SIZE(sb->extended_failed_devices);
2094
2095 while (i--)
2096 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
2097 }
2098 }
2099
sb_update_failed_devices(struct dm_raid_superblock * sb,uint64_t * failed_devices)2100 static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2101 {
2102 int i = ARRAY_SIZE(sb->extended_failed_devices);
2103
2104 sb->failed_devices = cpu_to_le64(failed_devices[0]);
2105 while (i--)
2106 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
2107 }
2108
2109 /*
2110 * Synchronize the superblock members with the raid set properties
2111 *
2112 * All superblock data is little endian.
2113 */
super_sync(struct mddev * mddev,struct md_rdev * rdev)2114 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2115 {
2116 bool update_failed_devices = false;
2117 unsigned int i;
2118 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2119 struct dm_raid_superblock *sb;
2120 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2121
2122 /* No metadata device, no superblock */
2123 if (!rdev->meta_bdev)
2124 return;
2125
2126 BUG_ON(!rdev->sb_page);
2127
2128 sb = page_address(rdev->sb_page);
2129
2130 sb_retrieve_failed_devices(sb, failed_devices);
2131
2132 for (i = 0; i < rs->raid_disks; i++)
2133 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2134 update_failed_devices = true;
2135 set_bit(i, (void *) failed_devices);
2136 }
2137
2138 if (update_failed_devices)
2139 sb_update_failed_devices(sb, failed_devices);
2140
2141 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
2142 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2143
2144 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2145 sb->array_position = cpu_to_le32(rdev->raid_disk);
2146
2147 sb->events = cpu_to_le64(mddev->events);
2148
2149 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2150 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2151
2152 sb->level = cpu_to_le32(mddev->level);
2153 sb->layout = cpu_to_le32(mddev->layout);
2154 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2155
2156 /********************************************************************
2157 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
2158 *
2159 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
2160 */
2161 sb->new_level = cpu_to_le32(mddev->new_level);
2162 sb->new_layout = cpu_to_le32(mddev->new_layout);
2163 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2164
2165 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2166
2167 smp_rmb(); /* Make sure we access most recent reshape position */
2168 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2169 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
2170 /* Flag ongoing reshape */
2171 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
2172
2173 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2174 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
2175 } else {
2176 /* Clear reshape flags */
2177 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
2178 }
2179
2180 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2181 sb->data_offset = cpu_to_le64(rdev->data_offset);
2182 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2183 sb->sectors = cpu_to_le64(rdev->sectors);
2184 sb->incompat_features = cpu_to_le32(0);
2185
2186 /* Zero out the rest of the payload after the size of the superblock */
2187 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2188 }
2189
2190 /*
2191 * super_load
2192 *
2193 * This function creates a superblock if one is not found on the device
2194 * and will decide which superblock to use if there's a choice.
2195 *
2196 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
2197 */
super_load(struct md_rdev * rdev,struct md_rdev * refdev)2198 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2199 {
2200 int r;
2201 struct dm_raid_superblock *sb;
2202 struct dm_raid_superblock *refsb;
2203 uint64_t events_sb, events_refsb;
2204
2205 r = read_disk_sb(rdev, rdev->sb_size, false);
2206 if (r)
2207 return r;
2208
2209 sb = page_address(rdev->sb_page);
2210
2211 /*
2212 * Two cases that we want to write new superblocks and rebuild:
2213 * 1) New device (no matching magic number)
2214 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
2215 */
2216 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
2217 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2218 super_sync(rdev->mddev, rdev);
2219
2220 set_bit(FirstUse, &rdev->flags);
2221 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2222
2223 /* Force writing of superblocks to disk */
2224 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2225
2226 /* Any superblock is better than none, choose that if given */
2227 return refdev ? 0 : 1;
2228 }
2229
2230 if (!refdev)
2231 return 1;
2232
2233 events_sb = le64_to_cpu(sb->events);
2234
2235 refsb = page_address(refdev->sb_page);
2236 events_refsb = le64_to_cpu(refsb->events);
2237
2238 return (events_sb > events_refsb) ? 1 : 0;
2239 }
2240
super_init_validation(struct raid_set * rs,struct md_rdev * rdev)2241 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2242 {
2243 int role;
2244 struct mddev *mddev = &rs->md;
2245 uint64_t events_sb;
2246 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2247 struct dm_raid_superblock *sb;
2248 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
2249 struct md_rdev *r;
2250 struct dm_raid_superblock *sb2;
2251
2252 sb = page_address(rdev->sb_page);
2253 events_sb = le64_to_cpu(sb->events);
2254
2255 /*
2256 * Initialise to 1 if this is a new superblock.
2257 */
2258 mddev->events = events_sb ? : 1;
2259
2260 mddev->reshape_position = MaxSector;
2261
2262 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2263 mddev->level = le32_to_cpu(sb->level);
2264 mddev->layout = le32_to_cpu(sb->layout);
2265 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2266
2267 /*
2268 * Reshaping is supported, e.g. reshape_position is valid
2269 * in superblock and superblock content is authoritative.
2270 */
2271 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2272 /* Superblock is authoritative wrt given raid set layout! */
2273 mddev->new_level = le32_to_cpu(sb->new_level);
2274 mddev->new_layout = le32_to_cpu(sb->new_layout);
2275 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2276 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2277 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2278
2279 /* raid was reshaping and got interrupted */
2280 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
2281 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
2282 DMERR("Reshape requested but raid set is still reshaping");
2283 return -EINVAL;
2284 }
2285
2286 if (mddev->delta_disks < 0 ||
2287 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2288 mddev->reshape_backwards = 1;
2289 else
2290 mddev->reshape_backwards = 0;
2291
2292 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2293 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2294 }
2295
2296 } else {
2297 /*
2298 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
2299 */
2300 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2301 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2302
2303 if (rs_takeover_requested(rs)) {
2304 if (rt_cur && rt_new)
2305 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
2306 rt_cur->name, rt_new->name);
2307 else
2308 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
2309 return -EINVAL;
2310 } else if (rs_reshape_requested(rs)) {
2311 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
2312 if (mddev->layout != mddev->new_layout) {
2313 if (rt_cur && rt_new)
2314 DMERR(" current layout %s vs new layout %s",
2315 rt_cur->name, rt_new->name);
2316 else
2317 DMERR(" current layout 0x%X vs new layout 0x%X",
2318 le32_to_cpu(sb->layout), mddev->new_layout);
2319 }
2320 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2321 DMERR(" current stripe sectors %u vs new stripe sectors %u",
2322 mddev->chunk_sectors, mddev->new_chunk_sectors);
2323 if (rs->delta_disks)
2324 DMERR(" current %u disks vs new %u disks",
2325 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2326 if (rs_is_raid10(rs)) {
2327 DMERR(" Old layout: %s w/ %u copies",
2328 raid10_md_layout_to_format(mddev->layout),
2329 raid10_md_layout_to_copies(mddev->layout));
2330 DMERR(" New layout: %s w/ %u copies",
2331 raid10_md_layout_to_format(mddev->new_layout),
2332 raid10_md_layout_to_copies(mddev->new_layout));
2333 }
2334 return -EINVAL;
2335 }
2336
2337 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2338 }
2339
2340 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2341 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2342
2343 /*
2344 * During load, we set FirstUse if a new superblock was written.
2345 * There are two reasons we might not have a superblock:
2346 * 1) The raid set is brand new - in which case, all of the
2347 * devices must have their In_sync bit set. Also,
2348 * recovery_cp must be 0, unless forced.
2349 * 2) This is a new device being added to an old raid set
2350 * and the new device needs to be rebuilt - in which
2351 * case the In_sync bit will /not/ be set and
2352 * recovery_cp must be MaxSector.
2353 * 3) This is/are a new device(s) being added to an old
2354 * raid set during takeover to a higher raid level
2355 * to provide capacity for redundancy or during reshape
2356 * to add capacity to grow the raid set.
2357 */
2358 rdev_for_each(r, mddev) {
2359 if (test_bit(Journal, &rdev->flags))
2360 continue;
2361
2362 if (test_bit(FirstUse, &r->flags))
2363 new_devs++;
2364
2365 if (!test_bit(In_sync, &r->flags)) {
2366 DMINFO("Device %d specified for rebuild; clearing superblock",
2367 r->raid_disk);
2368 rebuilds++;
2369
2370 if (test_bit(FirstUse, &r->flags))
2371 rebuild_and_new++;
2372 }
2373 }
2374
2375 if (new_devs == rs->raid_disks || !rebuilds) {
2376 /* Replace a broken device */
2377 if (new_devs == rs->raid_disks) {
2378 DMINFO("Superblocks created for new raid set");
2379 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2380 } else if (new_devs != rebuilds &&
2381 new_devs != rs->delta_disks) {
2382 DMERR("New device injected into existing raid set without "
2383 "'delta_disks' or 'rebuild' parameter specified");
2384 return -EINVAL;
2385 }
2386 } else if (new_devs && new_devs != rebuilds) {
2387 DMERR("%u 'rebuild' devices cannot be injected into"
2388 " a raid set with %u other first-time devices",
2389 rebuilds, new_devs);
2390 return -EINVAL;
2391 } else if (rebuilds) {
2392 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2393 DMERR("new device%s provided without 'rebuild'",
2394 new_devs > 1 ? "s" : "");
2395 return -EINVAL;
2396 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
2397 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2398 (unsigned long long) mddev->recovery_cp);
2399 return -EINVAL;
2400 } else if (rs_is_reshaping(rs)) {
2401 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2402 (unsigned long long) mddev->reshape_position);
2403 return -EINVAL;
2404 }
2405 }
2406
2407 /*
2408 * Now we set the Faulty bit for those devices that are
2409 * recorded in the superblock as failed.
2410 */
2411 sb_retrieve_failed_devices(sb, failed_devices);
2412 rdev_for_each(r, mddev) {
2413 if (test_bit(Journal, &rdev->flags) ||
2414 !r->sb_page)
2415 continue;
2416 sb2 = page_address(r->sb_page);
2417 sb2->failed_devices = 0;
2418 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2419
2420 /*
2421 * Check for any device re-ordering.
2422 */
2423 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2424 role = le32_to_cpu(sb2->array_position);
2425 if (role < 0)
2426 continue;
2427
2428 if (role != r->raid_disk) {
2429 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2430 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2431 rs->raid_disks % rs->raid10_copies) {
2432 rs->ti->error =
2433 "Cannot change raid10 near set to odd # of devices!";
2434 return -EINVAL;
2435 }
2436
2437 sb2->array_position = cpu_to_le32(r->raid_disk);
2438
2439 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2440 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2441 !rt_is_raid1(rs->raid_type)) {
2442 rs->ti->error = "Cannot change device positions in raid set";
2443 return -EINVAL;
2444 }
2445
2446 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2447 }
2448
2449 /*
2450 * Partial recovery is performed on
2451 * returning failed devices.
2452 */
2453 if (test_bit(role, (void *) failed_devices))
2454 set_bit(Faulty, &r->flags);
2455 }
2456 }
2457
2458 return 0;
2459 }
2460
super_validate(struct raid_set * rs,struct md_rdev * rdev)2461 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2462 {
2463 struct mddev *mddev = &rs->md;
2464 struct dm_raid_superblock *sb;
2465
2466 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2467 return 0;
2468
2469 sb = page_address(rdev->sb_page);
2470
2471 /*
2472 * If mddev->events is not set, we know we have not yet initialized
2473 * the array.
2474 */
2475 if (!mddev->events && super_init_validation(rs, rdev))
2476 return -EINVAL;
2477
2478 if (le32_to_cpu(sb->compat_features) &&
2479 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2480 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2481 return -EINVAL;
2482 }
2483
2484 if (sb->incompat_features) {
2485 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2486 return -EINVAL;
2487 }
2488
2489 /* Enable bitmap creation on @rs unless no metadevs or raid0 or journaled raid4/5/6 set. */
2490 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2491 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2492
2493 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2494 /*
2495 * Retrieve rdev size stored in superblock to be prepared for shrink.
2496 * Check extended superblock members are present otherwise the size
2497 * will not be set!
2498 */
2499 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2500 rdev->sectors = le64_to_cpu(sb->sectors);
2501
2502 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2503 if (rdev->recovery_offset == MaxSector)
2504 set_bit(In_sync, &rdev->flags);
2505 /*
2506 * If no reshape in progress -> we're recovering single
2507 * disk(s) and have to set the device(s) to out-of-sync
2508 */
2509 else if (!rs_is_reshaping(rs))
2510 clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
2511 }
2512
2513 /*
2514 * If a device comes back, set it as not In_sync and no longer faulty.
2515 */
2516 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2517 rdev->recovery_offset = 0;
2518 clear_bit(In_sync, &rdev->flags);
2519 rdev->saved_raid_disk = rdev->raid_disk;
2520 }
2521
2522 /* Reshape support -> restore respective data offsets */
2523 rdev->data_offset = le64_to_cpu(sb->data_offset);
2524 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2525
2526 return 0;
2527 }
2528
2529 /*
2530 * Analyse superblocks and select the freshest.
2531 */
analyse_superblocks(struct dm_target * ti,struct raid_set * rs)2532 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2533 {
2534 int r;
2535 struct md_rdev *rdev, *freshest;
2536 struct mddev *mddev = &rs->md;
2537
2538 freshest = NULL;
2539 rdev_for_each(rdev, mddev) {
2540 if (test_bit(Journal, &rdev->flags))
2541 continue;
2542
2543 if (!rdev->meta_bdev)
2544 continue;
2545
2546 /* Set superblock offset/size for metadata device. */
2547 rdev->sb_start = 0;
2548 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2549 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2550 DMERR("superblock size of a logical block is no longer valid");
2551 return -EINVAL;
2552 }
2553
2554 /*
2555 * Skipping super_load due to CTR_FLAG_SYNC will cause
2556 * the array to undergo initialization again as
2557 * though it were new. This is the intended effect
2558 * of the "sync" directive.
2559 *
2560 * With reshaping capability added, we must ensure that
2561 * the "sync" directive is disallowed during the reshape.
2562 */
2563 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2564 continue;
2565
2566 r = super_load(rdev, freshest);
2567
2568 switch (r) {
2569 case 1:
2570 freshest = rdev;
2571 break;
2572 case 0:
2573 break;
2574 default:
2575 /* This is a failure to read the superblock from the metadata device. */
2576 /*
2577 * We have to keep any raid0 data/metadata device pairs or
2578 * the MD raid0 personality will fail to start the array.
2579 */
2580 if (rs_is_raid0(rs))
2581 continue;
2582
2583 /*
2584 * We keep the dm_devs to be able to emit the device tuple
2585 * properly on the table line in raid_status() (rather than
2586 * mistakenly acting as if '- -' got passed into the constructor).
2587 *
2588 * The rdev has to stay on the same_set list to allow for
2589 * the attempt to restore faulty devices on second resume.
2590 */
2591 rdev->raid_disk = rdev->saved_raid_disk = -1;
2592 break;
2593 }
2594 }
2595
2596 if (!freshest)
2597 return 0;
2598
2599 /*
2600 * Validation of the freshest device provides the source of
2601 * validation for the remaining devices.
2602 */
2603 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2604 if (super_validate(rs, freshest))
2605 return -EINVAL;
2606
2607 if (validate_raid_redundancy(rs)) {
2608 rs->ti->error = "Insufficient redundancy to activate array";
2609 return -EINVAL;
2610 }
2611
2612 rdev_for_each(rdev, mddev)
2613 if (!test_bit(Journal, &rdev->flags) &&
2614 rdev != freshest &&
2615 super_validate(rs, rdev))
2616 return -EINVAL;
2617 return 0;
2618 }
2619
2620 /*
2621 * Adjust data_offset and new_data_offset on all disk members of @rs
2622 * for out of place reshaping if requested by constructor
2623 *
2624 * We need free space at the beginning of each raid disk for forward
2625 * and at the end for backward reshapes which userspace has to provide
2626 * via remapping/reordering of space.
2627 */
rs_adjust_data_offsets(struct raid_set * rs)2628 static int rs_adjust_data_offsets(struct raid_set *rs)
2629 {
2630 sector_t data_offset = 0, new_data_offset = 0;
2631 struct md_rdev *rdev;
2632
2633 /* Constructor did not request data offset change */
2634 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2635 if (!rs_is_reshapable(rs))
2636 goto out;
2637
2638 return 0;
2639 }
2640
2641 /* HM FIXME: get In_Sync raid_dev? */
2642 rdev = &rs->dev[0].rdev;
2643
2644 if (rs->delta_disks < 0) {
2645 /*
2646 * Removing disks (reshaping backwards):
2647 *
2648 * - before reshape: data is at offset 0 and free space
2649 * is at end of each component LV
2650 *
2651 * - after reshape: data is at offset rs->data_offset != 0 on each component LV
2652 */
2653 data_offset = 0;
2654 new_data_offset = rs->data_offset;
2655
2656 } else if (rs->delta_disks > 0) {
2657 /*
2658 * Adding disks (reshaping forwards):
2659 *
2660 * - before reshape: data is at offset rs->data_offset != 0 and
2661 * free space is at begin of each component LV
2662 *
2663 * - after reshape: data is at offset 0 on each component LV
2664 */
2665 data_offset = rs->data_offset;
2666 new_data_offset = 0;
2667
2668 } else {
2669 /*
2670 * User space passes in 0 for data offset after having removed reshape space
2671 *
2672 * - or - (data offset != 0)
2673 *
2674 * Changing RAID layout or chunk size -> toggle offsets
2675 *
2676 * - before reshape: data is at offset rs->data_offset 0 and
2677 * free space is at end of each component LV
2678 * -or-
2679 * data is at offset rs->data_offset != 0 and
2680 * free space is at begin of each component LV
2681 *
2682 * - after reshape: data is at offset 0 if it was at offset != 0
2683 * or at offset != 0 if it was at offset 0
2684 * on each component LV
2685 *
2686 */
2687 data_offset = rs->data_offset ? rdev->data_offset : 0;
2688 new_data_offset = data_offset ? 0 : rs->data_offset;
2689 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2690 }
2691
2692 /*
2693 * Make sure we got a minimum amount of free sectors per device
2694 */
2695 if (rs->data_offset &&
2696 bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
2697 rs->ti->error = data_offset ? "No space for forward reshape" :
2698 "No space for backward reshape";
2699 return -ENOSPC;
2700 }
2701 out:
2702 /*
2703 * Raise recovery_cp in case data_offset != 0 to
2704 * avoid false recovery positives in the constructor.
2705 */
2706 if (rs->md.recovery_cp < rs->md.dev_sectors)
2707 rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
2708
2709 /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
2710 rdev_for_each(rdev, &rs->md) {
2711 if (!test_bit(Journal, &rdev->flags)) {
2712 rdev->data_offset = data_offset;
2713 rdev->new_data_offset = new_data_offset;
2714 }
2715 }
2716
2717 return 0;
2718 }
2719
2720 /* Userpace reordered disks -> adjust raid_disk indexes in @rs */
__reorder_raid_disk_indexes(struct raid_set * rs)2721 static void __reorder_raid_disk_indexes(struct raid_set *rs)
2722 {
2723 int i = 0;
2724 struct md_rdev *rdev;
2725
2726 rdev_for_each(rdev, &rs->md) {
2727 if (!test_bit(Journal, &rdev->flags)) {
2728 rdev->raid_disk = i++;
2729 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2730 }
2731 }
2732 }
2733
2734 /*
2735 * Setup @rs for takeover by a different raid level
2736 */
rs_setup_takeover(struct raid_set * rs)2737 static int rs_setup_takeover(struct raid_set *rs)
2738 {
2739 struct mddev *mddev = &rs->md;
2740 struct md_rdev *rdev;
2741 unsigned int d = mddev->raid_disks = rs->raid_disks;
2742 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2743
2744 if (rt_is_raid10(rs->raid_type)) {
2745 if (rs_is_raid0(rs)) {
2746 /* Userpace reordered disks -> adjust raid_disk indexes */
2747 __reorder_raid_disk_indexes(rs);
2748
2749 /* raid0 -> raid10_far layout */
2750 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2751 rs->raid10_copies);
2752 } else if (rs_is_raid1(rs))
2753 /* raid1 -> raid10_near layout */
2754 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2755 rs->raid_disks);
2756 else
2757 return -EINVAL;
2758
2759 }
2760
2761 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2762 mddev->recovery_cp = MaxSector;
2763
2764 while (d--) {
2765 rdev = &rs->dev[d].rdev;
2766
2767 if (test_bit(d, (void *) rs->rebuild_disks)) {
2768 clear_bit(In_sync, &rdev->flags);
2769 clear_bit(Faulty, &rdev->flags);
2770 mddev->recovery_cp = rdev->recovery_offset = 0;
2771 /* Bitmap has to be created when we do an "up" takeover */
2772 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2773 }
2774
2775 rdev->new_data_offset = new_data_offset;
2776 }
2777
2778 return 0;
2779 }
2780
2781 /* Prepare @rs for reshape */
rs_prepare_reshape(struct raid_set * rs)2782 static int rs_prepare_reshape(struct raid_set *rs)
2783 {
2784 bool reshape;
2785 struct mddev *mddev = &rs->md;
2786
2787 if (rs_is_raid10(rs)) {
2788 if (rs->raid_disks != mddev->raid_disks &&
2789 __is_raid10_near(mddev->layout) &&
2790 rs->raid10_copies &&
2791 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2792 /*
2793 * raid disk have to be multiple of data copies to allow this conversion,
2794 *
2795 * This is actually not a reshape it is a
2796 * rebuild of any additional mirrors per group
2797 */
2798 if (rs->raid_disks % rs->raid10_copies) {
2799 rs->ti->error = "Can't reshape raid10 mirror groups";
2800 return -EINVAL;
2801 }
2802
2803 /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */
2804 __reorder_raid_disk_indexes(rs);
2805 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2806 rs->raid10_copies);
2807 mddev->new_layout = mddev->layout;
2808 reshape = false;
2809 } else
2810 reshape = true;
2811
2812 } else if (rs_is_raid456(rs))
2813 reshape = true;
2814
2815 else if (rs_is_raid1(rs)) {
2816 if (rs->delta_disks) {
2817 /* Process raid1 via delta_disks */
2818 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2819 reshape = true;
2820 } else {
2821 /* Process raid1 without delta_disks */
2822 mddev->raid_disks = rs->raid_disks;
2823 reshape = false;
2824 }
2825 } else {
2826 rs->ti->error = "Called with bogus raid type";
2827 return -EINVAL;
2828 }
2829
2830 if (reshape) {
2831 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2832 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2833 } else if (mddev->raid_disks < rs->raid_disks)
2834 /* Create new superblocks and bitmaps, if any new disks */
2835 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2836
2837 return 0;
2838 }
2839
2840 /*
2841 * Reshape:
2842 * - change raid layout
2843 * - change chunk size
2844 * - add disks
2845 * - remove disks
2846 */
rs_setup_reshape(struct raid_set * rs)2847 static int rs_setup_reshape(struct raid_set *rs)
2848 {
2849 int r = 0;
2850 unsigned int cur_raid_devs, d;
2851 sector_t reshape_sectors = _get_reshape_sectors(rs);
2852 struct mddev *mddev = &rs->md;
2853 struct md_rdev *rdev;
2854
2855 mddev->delta_disks = rs->delta_disks;
2856 cur_raid_devs = mddev->raid_disks;
2857
2858 /* Ignore impossible layout change whilst adding/removing disks */
2859 if (mddev->delta_disks &&
2860 mddev->layout != mddev->new_layout) {
2861 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2862 mddev->new_layout = mddev->layout;
2863 }
2864
2865 /*
2866 * Adjust array size:
2867 *
2868 * - in case of adding disk(s), array size has
2869 * to grow after the disk adding reshape,
2870 * which'll happen in the event handler;
2871 * reshape will happen forward, so space has to
2872 * be available at the beginning of each disk
2873 *
2874 * - in case of removing disk(s), array size
2875 * has to shrink before starting the reshape,
2876 * which'll happen here;
2877 * reshape will happen backward, so space has to
2878 * be available at the end of each disk
2879 *
2880 * - data_offset and new_data_offset are
2881 * adjusted for aforementioned out of place
2882 * reshaping based on userspace passing in
2883 * the "data_offset <sectors>" key/value
2884 * pair via the constructor
2885 */
2886
2887 /* Add disk(s) */
2888 if (rs->delta_disks > 0) {
2889 /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */
2890 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2891 rdev = &rs->dev[d].rdev;
2892 clear_bit(In_sync, &rdev->flags);
2893
2894 /*
2895 * save_raid_disk needs to be -1, or recovery_offset will be set to 0
2896 * by md, which'll store that erroneously in the superblock on reshape
2897 */
2898 rdev->saved_raid_disk = -1;
2899 rdev->raid_disk = d;
2900
2901 rdev->sectors = mddev->dev_sectors;
2902 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2903 }
2904
2905 mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */
2906
2907 /* Remove disk(s) */
2908 } else if (rs->delta_disks < 0) {
2909 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true);
2910 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
2911
2912 /* Change layout and/or chunk size */
2913 } else {
2914 /*
2915 * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size:
2916 *
2917 * keeping number of disks and do layout change ->
2918 *
2919 * toggle reshape_backward depending on data_offset:
2920 *
2921 * - free space upfront -> reshape forward
2922 *
2923 * - free space at the end -> reshape backward
2924 *
2925 *
2926 * This utilizes free reshape space avoiding the need
2927 * for userspace to move (parts of) LV segments in
2928 * case of layout/chunksize change (for disk
2929 * adding/removing reshape space has to be at
2930 * the proper address (see above with delta_disks):
2931 *
2932 * add disk(s) -> begin
2933 * remove disk(s)-> end
2934 */
2935 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2936 }
2937
2938 /*
2939 * Adjust device size for forward reshape
2940 * because md_finish_reshape() reduces it.
2941 */
2942 if (!mddev->reshape_backwards)
2943 rdev_for_each(rdev, &rs->md)
2944 if (!test_bit(Journal, &rdev->flags))
2945 rdev->sectors += reshape_sectors;
2946
2947 return r;
2948 }
2949
2950 /*
2951 * If the md resync thread has updated superblock with max reshape position
2952 * at the end of a reshape but not (yet) reset the layout configuration
2953 * changes -> reset the latter.
2954 */
rs_reset_inconclusive_reshape(struct raid_set * rs)2955 static void rs_reset_inconclusive_reshape(struct raid_set *rs)
2956 {
2957 if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
2958 rs_set_cur(rs);
2959 rs->md.delta_disks = 0;
2960 rs->md.reshape_backwards = 0;
2961 }
2962 }
2963
2964 /*
2965 * Enable/disable discard support on RAID set depending on
2966 * RAID level and discard properties of underlying RAID members.
2967 */
configure_discard_support(struct raid_set * rs)2968 static void configure_discard_support(struct raid_set *rs)
2969 {
2970 int i;
2971 bool raid456;
2972 struct dm_target *ti = rs->ti;
2973
2974 /*
2975 * XXX: RAID level 4,5,6 require zeroing for safety.
2976 */
2977 raid456 = rs_is_raid456(rs);
2978
2979 for (i = 0; i < rs->raid_disks; i++) {
2980 if (!rs->dev[i].rdev.bdev ||
2981 !bdev_max_discard_sectors(rs->dev[i].rdev.bdev))
2982 return;
2983
2984 if (raid456) {
2985 if (!devices_handle_discard_safely) {
2986 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2987 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2988 return;
2989 }
2990 }
2991 }
2992
2993 ti->num_discard_bios = 1;
2994 }
2995
2996 /*
2997 * Construct a RAID0/1/10/4/5/6 mapping:
2998 * Args:
2999 * <raid_type> <#raid_params> <raid_params>{0,} \
3000 * <#raid_devs> [<meta_dev1> <dev1>]{1,}
3001 *
3002 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
3003 * details on possible <raid_params>.
3004 *
3005 * Userspace is free to initialize the metadata devices, hence the superblocks to
3006 * enforce recreation based on the passed in table parameters.
3007 *
3008 */
raid_ctr(struct dm_target * ti,unsigned int argc,char ** argv)3009 static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3010 {
3011 int r;
3012 bool resize = false;
3013 struct raid_type *rt;
3014 unsigned int num_raid_params, num_raid_devs;
3015 sector_t sb_array_sectors, rdev_sectors, reshape_sectors;
3016 struct raid_set *rs = NULL;
3017 const char *arg;
3018 struct rs_layout rs_layout;
3019 struct dm_arg_set as = { argc, argv }, as_nrd;
3020 struct dm_arg _args[] = {
3021 { 0, as.argc, "Cannot understand number of raid parameters" },
3022 { 1, 254, "Cannot understand number of raid devices parameters" }
3023 };
3024
3025 arg = dm_shift_arg(&as);
3026 if (!arg) {
3027 ti->error = "No arguments";
3028 return -EINVAL;
3029 }
3030
3031 rt = get_raid_type(arg);
3032 if (!rt) {
3033 ti->error = "Unrecognised raid_type";
3034 return -EINVAL;
3035 }
3036
3037 /* Must have <#raid_params> */
3038 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
3039 return -EINVAL;
3040
3041 /* number of raid device tupples <meta_dev data_dev> */
3042 as_nrd = as;
3043 dm_consume_args(&as_nrd, num_raid_params);
3044 _args[1].max = (as_nrd.argc - 1) / 2;
3045 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
3046 return -EINVAL;
3047
3048 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
3049 ti->error = "Invalid number of supplied raid devices";
3050 return -EINVAL;
3051 }
3052
3053 rs = raid_set_alloc(ti, rt, num_raid_devs);
3054 if (IS_ERR(rs))
3055 return PTR_ERR(rs);
3056
3057 r = parse_raid_params(rs, &as, num_raid_params);
3058 if (r)
3059 goto bad;
3060
3061 r = parse_dev_params(rs, &as);
3062 if (r)
3063 goto bad;
3064
3065 rs->md.sync_super = super_sync;
3066
3067 /*
3068 * Calculate ctr requested array and device sizes to allow
3069 * for superblock analysis needing device sizes defined.
3070 *
3071 * Any existing superblock will overwrite the array and device sizes
3072 */
3073 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
3074 if (r)
3075 goto bad;
3076
3077 /* Memorize just calculated, potentially larger sizes to grow the raid set in preresume */
3078 rs->array_sectors = rs->md.array_sectors;
3079 rs->dev_sectors = rs->md.dev_sectors;
3080
3081 /*
3082 * Backup any new raid set level, layout, ...
3083 * requested to be able to compare to superblock
3084 * members for conversion decisions.
3085 */
3086 rs_config_backup(rs, &rs_layout);
3087
3088 r = analyse_superblocks(ti, rs);
3089 if (r)
3090 goto bad;
3091
3092 /* All in-core metadata now as of current superblocks after calling analyse_superblocks() */
3093 sb_array_sectors = rs->md.array_sectors;
3094 rdev_sectors = __rdev_sectors(rs);
3095 if (!rdev_sectors) {
3096 ti->error = "Invalid rdev size";
3097 r = -EINVAL;
3098 goto bad;
3099 }
3100
3101
3102 reshape_sectors = _get_reshape_sectors(rs);
3103 if (rs->dev_sectors != rdev_sectors) {
3104 resize = (rs->dev_sectors != rdev_sectors - reshape_sectors);
3105 if (rs->dev_sectors > rdev_sectors - reshape_sectors)
3106 set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3107 }
3108
3109 INIT_WORK(&rs->md.event_work, do_table_event);
3110 ti->private = rs;
3111 ti->num_flush_bios = 1;
3112 ti->needs_bio_set_dev = true;
3113
3114 /* Restore any requested new layout for conversion decision */
3115 rs_config_restore(rs, &rs_layout);
3116
3117 /*
3118 * Now that we have any superblock metadata available,
3119 * check for new, recovering, reshaping, to be taken over,
3120 * to be reshaped or an existing, unchanged raid set to
3121 * run in sequence.
3122 */
3123 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
3124 /* A new raid6 set has to be recovered to ensure proper parity and Q-Syndrome */
3125 if (rs_is_raid6(rs) &&
3126 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
3127 ti->error = "'nosync' not allowed for new raid6 set";
3128 r = -EINVAL;
3129 goto bad;
3130 }
3131 rs_setup_recovery(rs, 0);
3132 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3133 rs_set_new(rs);
3134 } else if (rs_is_recovering(rs)) {
3135 /* A recovering raid set may be resized */
3136 goto size_check;
3137 } else if (rs_is_reshaping(rs)) {
3138 /* Have to reject size change request during reshape */
3139 if (resize) {
3140 ti->error = "Can't resize a reshaping raid set";
3141 r = -EPERM;
3142 goto bad;
3143 }
3144 /* skip setup rs */
3145 } else if (rs_takeover_requested(rs)) {
3146 if (rs_is_reshaping(rs)) {
3147 ti->error = "Can't takeover a reshaping raid set";
3148 r = -EPERM;
3149 goto bad;
3150 }
3151
3152 /* We can't takeover a journaled raid4/5/6 */
3153 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3154 ti->error = "Can't takeover a journaled raid4/5/6 set";
3155 r = -EPERM;
3156 goto bad;
3157 }
3158
3159 /*
3160 * If a takeover is needed, userspace sets any additional
3161 * devices to rebuild and we can check for a valid request here.
3162 *
3163 * If acceptable, set the level to the new requested
3164 * one, prohibit requesting recovery, allow the raid
3165 * set to run and store superblocks during resume.
3166 */
3167 r = rs_check_takeover(rs);
3168 if (r)
3169 goto bad;
3170
3171 r = rs_setup_takeover(rs);
3172 if (r)
3173 goto bad;
3174
3175 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3176 /* Takeover ain't recovery, so disable recovery */
3177 rs_setup_recovery(rs, MaxSector);
3178 rs_set_new(rs);
3179 } else if (rs_reshape_requested(rs)) {
3180 /* Only request grow on raid set size extensions, not on reshapes. */
3181 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3182
3183 /*
3184 * No need to check for 'ongoing' takeover here, because takeover
3185 * is an instant operation as oposed to an ongoing reshape.
3186 */
3187
3188 /* We can't reshape a journaled raid4/5/6 */
3189 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3190 ti->error = "Can't reshape a journaled raid4/5/6 set";
3191 r = -EPERM;
3192 goto bad;
3193 }
3194
3195 /* Out-of-place space has to be available to allow for a reshape unless raid1! */
3196 if (reshape_sectors || rs_is_raid1(rs)) {
3197 /*
3198 * We can only prepare for a reshape here, because the
3199 * raid set needs to run to provide the repective reshape
3200 * check functions via its MD personality instance.
3201 *
3202 * So do the reshape check after md_run() succeeded.
3203 */
3204 r = rs_prepare_reshape(rs);
3205 if (r)
3206 goto bad;
3207
3208 /* Reshaping ain't recovery, so disable recovery */
3209 rs_setup_recovery(rs, MaxSector);
3210 }
3211 rs_set_cur(rs);
3212 } else {
3213 size_check:
3214 /* May not set recovery when a device rebuild is requested */
3215 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3216 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3217 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3218 rs_setup_recovery(rs, MaxSector);
3219 } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
3220 /*
3221 * Set raid set to current size, i.e. size as of
3222 * superblocks to grow to larger size in preresume.
3223 */
3224 r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false);
3225 if (r)
3226 goto bad;
3227
3228 rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
3229 } else {
3230 /* This is no size change or it is shrinking, update size and record in superblocks */
3231 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
3232 if (r)
3233 goto bad;
3234
3235 if (sb_array_sectors > rs->array_sectors)
3236 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3237 }
3238 rs_set_cur(rs);
3239 }
3240
3241 /* If constructor requested it, change data and new_data offsets */
3242 r = rs_adjust_data_offsets(rs);
3243 if (r)
3244 goto bad;
3245
3246 /* Catch any inconclusive reshape superblock content. */
3247 rs_reset_inconclusive_reshape(rs);
3248
3249 /* Start raid set read-only and assumed clean to change in raid_resume() */
3250 rs->md.ro = 1;
3251 rs->md.in_sync = 1;
3252
3253 /* Has to be held on running the array */
3254 mddev_suspend_and_lock_nointr(&rs->md);
3255
3256 /* Keep array frozen until resume. */
3257 md_frozen_sync_thread(&rs->md);
3258
3259 r = md_run(&rs->md);
3260 rs->md.in_sync = 0; /* Assume already marked dirty */
3261 if (r) {
3262 ti->error = "Failed to run raid array";
3263 mddev_unlock(&rs->md);
3264 goto bad;
3265 }
3266
3267 r = md_start(&rs->md);
3268 if (r) {
3269 ti->error = "Failed to start raid array";
3270 goto bad_unlock;
3271 }
3272
3273 /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
3274 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3275 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3276 if (r) {
3277 ti->error = "Failed to set raid4/5/6 journal mode";
3278 goto bad_unlock;
3279 }
3280 }
3281
3282 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3283
3284 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
3285 if (rs_is_raid456(rs)) {
3286 r = rs_set_raid456_stripe_cache(rs);
3287 if (r)
3288 goto bad_unlock;
3289 }
3290
3291 /* Now do an early reshape check */
3292 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3293 r = rs_check_reshape(rs);
3294 if (r)
3295 goto bad_unlock;
3296
3297 /* Restore new, ctr requested layout to perform check */
3298 rs_config_restore(rs, &rs_layout);
3299
3300 if (rs->md.pers->start_reshape) {
3301 r = rs->md.pers->check_reshape(&rs->md);
3302 if (r) {
3303 ti->error = "Reshape check failed";
3304 goto bad_unlock;
3305 }
3306 }
3307 }
3308
3309 /* Disable/enable discard support on raid set. */
3310 configure_discard_support(rs);
3311
3312 mddev_unlock(&rs->md);
3313 return 0;
3314
3315 bad_unlock:
3316 md_stop(&rs->md);
3317 mddev_unlock(&rs->md);
3318 bad:
3319 raid_set_free(rs);
3320
3321 return r;
3322 }
3323
raid_dtr(struct dm_target * ti)3324 static void raid_dtr(struct dm_target *ti)
3325 {
3326 struct raid_set *rs = ti->private;
3327
3328 mddev_lock_nointr(&rs->md);
3329 md_stop(&rs->md);
3330 mddev_unlock(&rs->md);
3331
3332 if (work_pending(&rs->md.event_work))
3333 flush_work(&rs->md.event_work);
3334 raid_set_free(rs);
3335 }
3336
raid_map(struct dm_target * ti,struct bio * bio)3337 static int raid_map(struct dm_target *ti, struct bio *bio)
3338 {
3339 struct raid_set *rs = ti->private;
3340 struct mddev *mddev = &rs->md;
3341
3342 /*
3343 * If we're reshaping to add disk(s), ti->len and
3344 * mddev->array_sectors will differ during the process
3345 * (ti->len > mddev->array_sectors), so we have to requeue
3346 * bios with addresses > mddev->array_sectors here or
3347 * there will occur accesses past EOD of the component
3348 * data images thus erroring the raid set.
3349 */
3350 if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
3351 return DM_MAPIO_REQUEUE;
3352
3353 if (unlikely(!md_handle_request(mddev, bio)))
3354 return DM_MAPIO_REQUEUE;
3355
3356 return DM_MAPIO_SUBMITTED;
3357 }
3358
3359 /* Return sync state string for @state */
3360 enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
sync_str(enum sync_state state)3361 static const char *sync_str(enum sync_state state)
3362 {
3363 /* Has to be in above sync_state order! */
3364 static const char *sync_strs[] = {
3365 "frozen",
3366 "reshape",
3367 "resync",
3368 "check",
3369 "repair",
3370 "recover",
3371 "idle"
3372 };
3373
3374 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
3375 };
3376
3377 /* Return enum sync_state for @mddev derived from @recovery flags */
decipher_sync_action(struct mddev * mddev,unsigned long recovery)3378 static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3379 {
3380 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3381 return st_frozen;
3382
3383 /* The MD sync thread can be done with io or be interrupted but still be running */
3384 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3385 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3386 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3387 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3388 return st_reshape;
3389
3390 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3391 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3392 return st_resync;
3393 if (test_bit(MD_RECOVERY_CHECK, &recovery))
3394 return st_check;
3395 return st_repair;
3396 }
3397
3398 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3399 return st_recover;
3400
3401 if (mddev->reshape_position != MaxSector)
3402 return st_reshape;
3403 }
3404
3405 return st_idle;
3406 }
3407
3408 /*
3409 * Return status string for @rdev
3410 *
3411 * Status characters:
3412 *
3413 * 'D' = Dead/Failed raid set component or raid4/5/6 journal device
3414 * 'a' = Alive but not in-sync raid set component _or_ alive raid4/5/6 'write_back' journal device
3415 * 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device
3416 * '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
3417 */
__raid_dev_status(struct raid_set * rs,struct md_rdev * rdev)3418 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
3419 {
3420 if (!rdev->bdev)
3421 return "-";
3422 else if (test_bit(Faulty, &rdev->flags))
3423 return "D";
3424 else if (test_bit(Journal, &rdev->flags))
3425 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
3426 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
3427 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
3428 !test_bit(In_sync, &rdev->flags)))
3429 return "a";
3430 else
3431 return "A";
3432 }
3433
3434 /* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
rs_get_progress(struct raid_set * rs,unsigned long recovery,enum sync_state state,sector_t resync_max_sectors)3435 static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3436 enum sync_state state, sector_t resync_max_sectors)
3437 {
3438 sector_t r;
3439 struct mddev *mddev = &rs->md;
3440
3441 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3442 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3443
3444 if (rs_is_raid0(rs)) {
3445 r = resync_max_sectors;
3446 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3447
3448 } else {
3449 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
3450 r = mddev->recovery_cp;
3451 else
3452 r = mddev->curr_resync_completed;
3453
3454 if (state == st_idle && r >= resync_max_sectors) {
3455 /*
3456 * Sync complete.
3457 */
3458 /* In case we have finished recovering, the array is in sync. */
3459 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3460 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3461
3462 } else if (state == st_recover)
3463 /*
3464 * In case we are recovering, the array is not in sync
3465 * and health chars should show the recovering legs.
3466 *
3467 * Already retrieved recovery offset from curr_resync_completed above.
3468 */
3469 ;
3470
3471 else if (state == st_resync || state == st_reshape)
3472 /*
3473 * If "resync/reshape" is occurring, the raid set
3474 * is or may be out of sync hence the health
3475 * characters shall be 'a'.
3476 */
3477 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3478
3479 else if (state == st_check || state == st_repair)
3480 /*
3481 * If "check" or "repair" is occurring, the raid set has
3482 * undergone an initial sync and the health characters
3483 * should not be 'a' anymore.
3484 */
3485 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3486
3487 else if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3488 /*
3489 * We are idle and recovery is needed, prevent 'A' chars race
3490 * caused by components still set to in-sync by constructor.
3491 */
3492 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3493
3494 else {
3495 /*
3496 * We are idle and the raid set may be doing an initial
3497 * sync, or it may be rebuilding individual components.
3498 * If all the devices are In_sync, then it is the raid set
3499 * that is being initialized.
3500 */
3501 struct md_rdev *rdev;
3502
3503 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3504 rdev_for_each(rdev, mddev)
3505 if (!test_bit(Journal, &rdev->flags) &&
3506 !test_bit(In_sync, &rdev->flags)) {
3507 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3508 break;
3509 }
3510 }
3511 }
3512
3513 return min(r, resync_max_sectors);
3514 }
3515
3516 /* Helper to return @dev name or "-" if !@dev */
__get_dev_name(struct dm_dev * dev)3517 static const char *__get_dev_name(struct dm_dev *dev)
3518 {
3519 return dev ? dev->name : "-";
3520 }
3521
raid_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)3522 static void raid_status(struct dm_target *ti, status_type_t type,
3523 unsigned int status_flags, char *result, unsigned int maxlen)
3524 {
3525 struct raid_set *rs = ti->private;
3526 struct mddev *mddev = &rs->md;
3527 struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL;
3528 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3529 unsigned long recovery;
3530 unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
3531 unsigned int sz = 0;
3532 unsigned int rebuild_writemostly_count = 0;
3533 sector_t progress, resync_max_sectors, resync_mismatches;
3534 enum sync_state state;
3535 struct raid_type *rt;
3536
3537 switch (type) {
3538 case STATUSTYPE_INFO:
3539 /* *Should* always succeed */
3540 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3541 if (!rt)
3542 return;
3543
3544 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3545
3546 /* Access most recent mddev properties for status output */
3547 smp_rmb();
3548 /* Get sensible max sectors even if raid set not yet started */
3549 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3550 mddev->resync_max_sectors : mddev->dev_sectors;
3551 recovery = rs->md.recovery;
3552 state = decipher_sync_action(mddev, recovery);
3553 progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
3554 resync_mismatches = mddev->last_sync_action == ACTION_CHECK ?
3555 atomic64_read(&mddev->resync_mismatches) : 0;
3556
3557 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
3558 for (i = 0; i < rs->raid_disks; i++)
3559 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3560
3561 /*
3562 * In-sync/Reshape ratio:
3563 * The in-sync ratio shows the progress of:
3564 * - Initializing the raid set
3565 * - Rebuilding a subset of devices of the raid set
3566 * The user can distinguish between the two by referring
3567 * to the status characters.
3568 *
3569 * The reshape ratio shows the progress of
3570 * changing the raid layout or the number of
3571 * disks of a raid set
3572 */
3573 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3574 (unsigned long long) resync_max_sectors);
3575
3576 /*
3577 * v1.5.0+:
3578 *
3579 * Sync action:
3580 * See Documentation/admin-guide/device-mapper/dm-raid.rst for
3581 * information on each of these states.
3582 */
3583 DMEMIT(" %s", sync_str(state));
3584
3585 /*
3586 * v1.5.0+:
3587 *
3588 * resync_mismatches/mismatch_cnt
3589 * This field shows the number of discrepancies found when
3590 * performing a "check" of the raid set.
3591 */
3592 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3593
3594 /*
3595 * v1.9.0+:
3596 *
3597 * data_offset (needed for out of space reshaping)
3598 * This field shows the data offset into the data
3599 * image LV where the first stripes data starts.
3600 *
3601 * We keep data_offset equal on all raid disks of the set,
3602 * so retrieving it from the first raid disk is sufficient.
3603 */
3604 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3605
3606 /*
3607 * v1.10.0+:
3608 */
3609 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
3610 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
3611 break;
3612
3613 case STATUSTYPE_TABLE:
3614 /* Report the table line string you would use to construct this raid set */
3615
3616 /*
3617 * Count any rebuild or writemostly argument pairs and subtract the
3618 * hweight count being added below of any rebuild and writemostly ctr flags.
3619 */
3620 for (i = 0; i < rs->raid_disks; i++) {
3621 rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) +
3622 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
3623 }
3624 rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) +
3625 (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0);
3626 /* Calculate raid parameter count based on ^ rebuild/writemostly argument counts and ctr flags set. */
3627 raid_param_cnt += rebuild_writemostly_count +
3628 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3629 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
3630 /* Emit table line */
3631 /* This has to be in the documented order for userspace! */
3632 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3633 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3634 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3635 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3636 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3637 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags))
3638 for (i = 0; i < rs->raid_disks; i++)
3639 if (test_bit(i, (void *) rs->rebuild_disks))
3640 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i);
3641 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3642 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3643 mddev->bitmap_info.daemon_sleep);
3644 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3645 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3646 mddev->sync_speed_min);
3647 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3648 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3649 mddev->sync_speed_max);
3650 if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags))
3651 for (i = 0; i < rs->raid_disks; i++)
3652 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3653 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3654 rs->dev[i].rdev.raid_disk);
3655 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3656 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3657 mddev->bitmap_info.max_write_behind);
3658 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3659 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3660 max_nr_stripes);
3661 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3662 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3663 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3664 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3665 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3666 raid10_md_layout_to_copies(mddev->layout));
3667 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3668 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3669 raid10_md_layout_to_format(mddev->layout));
3670 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3671 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3672 max(rs->delta_disks, mddev->delta_disks));
3673 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3674 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3675 (unsigned long long) rs->data_offset);
3676 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
3677 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
3678 __get_dev_name(rs->journal_dev.dev));
3679 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags))
3680 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE),
3681 md_journal_mode_to_dm_raid(rs->journal_dev.mode));
3682 DMEMIT(" %d", rs->raid_disks);
3683 for (i = 0; i < rs->raid_disks; i++)
3684 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
3685 __get_dev_name(rs->dev[i].data_dev));
3686 break;
3687
3688 case STATUSTYPE_IMA:
3689 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3690 if (!rt)
3691 return;
3692
3693 DMEMIT_TARGET_NAME_VERSION(ti->type);
3694 DMEMIT(",raid_type=%s,raid_disks=%d", rt->name, mddev->raid_disks);
3695
3696 /* Access most recent mddev properties for status output */
3697 smp_rmb();
3698 recovery = rs->md.recovery;
3699 state = decipher_sync_action(mddev, recovery);
3700 DMEMIT(",raid_state=%s", sync_str(state));
3701
3702 for (i = 0; i < rs->raid_disks; i++) {
3703 DMEMIT(",raid_device_%d_status=", i);
3704 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3705 }
3706
3707 if (rt_is_raid456(rt)) {
3708 DMEMIT(",journal_dev_mode=");
3709 switch (rs->journal_dev.mode) {
3710 case R5C_JOURNAL_MODE_WRITE_THROUGH:
3711 DMEMIT("%s",
3712 _raid456_journal_mode[R5C_JOURNAL_MODE_WRITE_THROUGH].param);
3713 break;
3714 case R5C_JOURNAL_MODE_WRITE_BACK:
3715 DMEMIT("%s",
3716 _raid456_journal_mode[R5C_JOURNAL_MODE_WRITE_BACK].param);
3717 break;
3718 default:
3719 DMEMIT("invalid");
3720 break;
3721 }
3722 }
3723 DMEMIT(";");
3724 break;
3725 }
3726 }
3727
raid_message(struct dm_target * ti,unsigned int argc,char ** argv,char * result,unsigned int maxlen)3728 static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
3729 char *result, unsigned int maxlen)
3730 {
3731 struct raid_set *rs = ti->private;
3732 struct mddev *mddev = &rs->md;
3733 int ret = 0;
3734
3735 if (!mddev->pers || !mddev->pers->sync_request)
3736 return -EINVAL;
3737
3738 if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags) ||
3739 test_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags))
3740 return -EBUSY;
3741
3742 if (!strcasecmp(argv[0], "frozen")) {
3743 ret = mddev_lock(mddev);
3744 if (ret)
3745 return ret;
3746
3747 md_frozen_sync_thread(mddev);
3748 mddev_unlock(mddev);
3749 } else if (!strcasecmp(argv[0], "idle")) {
3750 ret = mddev_lock(mddev);
3751 if (ret)
3752 return ret;
3753
3754 md_idle_sync_thread(mddev);
3755 mddev_unlock(mddev);
3756 }
3757
3758 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3759 if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
3760 return -EBUSY;
3761 else if (!strcasecmp(argv[0], "resync"))
3762 ; /* MD_RECOVERY_NEEDED set below */
3763 else if (!strcasecmp(argv[0], "recover"))
3764 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3765 else {
3766 if (!strcasecmp(argv[0], "check")) {
3767 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3768 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3769 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3770 } else if (!strcasecmp(argv[0], "repair")) {
3771 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3772 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3773 } else
3774 return -EINVAL;
3775 }
3776 if (mddev->ro == 2) {
3777 /* A write to sync_action is enough to justify
3778 * canceling read-auto mode
3779 */
3780 mddev->ro = 0;
3781 if (!mddev->suspended)
3782 md_wakeup_thread(mddev->sync_thread);
3783 }
3784 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3785 if (!mddev->suspended)
3786 md_wakeup_thread(mddev->thread);
3787
3788 return 0;
3789 }
3790
raid_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)3791 static int raid_iterate_devices(struct dm_target *ti,
3792 iterate_devices_callout_fn fn, void *data)
3793 {
3794 struct raid_set *rs = ti->private;
3795 unsigned int i;
3796 int r = 0;
3797
3798 for (i = 0; !r && i < rs->raid_disks; i++) {
3799 if (rs->dev[i].data_dev) {
3800 r = fn(ti, rs->dev[i].data_dev,
3801 0, /* No offset on data devs */
3802 rs->md.dev_sectors, data);
3803 }
3804 }
3805
3806 return r;
3807 }
3808
raid_io_hints(struct dm_target * ti,struct queue_limits * limits)3809 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3810 {
3811 struct raid_set *rs = ti->private;
3812 unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
3813
3814 limits->io_min = chunk_size_bytes;
3815 limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
3816 }
3817
raid_presuspend(struct dm_target * ti)3818 static void raid_presuspend(struct dm_target *ti)
3819 {
3820 struct raid_set *rs = ti->private;
3821 struct mddev *mddev = &rs->md;
3822
3823 /*
3824 * From now on, disallow raid_message() to change sync_thread until
3825 * resume, raid_postsuspend() is too late.
3826 */
3827 set_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
3828
3829 if (!reshape_interrupted(mddev))
3830 return;
3831
3832 /*
3833 * For raid456, if reshape is interrupted, IO across reshape position
3834 * will never make progress, while caller will wait for IO to be done.
3835 * Inform raid456 to handle those IO to prevent deadlock.
3836 */
3837 if (mddev->pers && mddev->pers->prepare_suspend)
3838 mddev->pers->prepare_suspend(mddev);
3839 }
3840
raid_presuspend_undo(struct dm_target * ti)3841 static void raid_presuspend_undo(struct dm_target *ti)
3842 {
3843 struct raid_set *rs = ti->private;
3844
3845 clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
3846 }
3847
raid_postsuspend(struct dm_target * ti)3848 static void raid_postsuspend(struct dm_target *ti)
3849 {
3850 struct raid_set *rs = ti->private;
3851
3852 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
3853 /*
3854 * sync_thread must be stopped during suspend, and writes have
3855 * to be stopped before suspending to avoid deadlocks.
3856 */
3857 md_stop_writes(&rs->md);
3858 mddev_suspend(&rs->md, false);
3859 }
3860 }
3861
attempt_restore_of_faulty_devices(struct raid_set * rs)3862 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3863 {
3864 int i;
3865 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3866 unsigned long flags;
3867 bool cleared = false;
3868 struct dm_raid_superblock *sb;
3869 struct mddev *mddev = &rs->md;
3870 struct md_rdev *r;
3871
3872 /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
3873 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3874 return;
3875
3876 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3877
3878 for (i = 0; i < rs->raid_disks; i++) {
3879 r = &rs->dev[i].rdev;
3880 /* HM FIXME: enhance journal device recovery processing */
3881 if (test_bit(Journal, &r->flags))
3882 continue;
3883
3884 if (test_bit(Faulty, &r->flags) &&
3885 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
3886 DMINFO("Faulty %s device #%d has readable super block."
3887 " Attempting to revive it.",
3888 rs->raid_type->name, i);
3889
3890 /*
3891 * Faulty bit may be set, but sometimes the array can
3892 * be suspended before the personalities can respond
3893 * by removing the device from the array (i.e. calling
3894 * 'hot_remove_disk'). If they haven't yet removed
3895 * the failed device, its 'raid_disk' number will be
3896 * '>= 0' - meaning we must call this function
3897 * ourselves.
3898 */
3899 flags = r->flags;
3900 clear_bit(In_sync, &r->flags); /* Mandatory for hot remove. */
3901 if (r->raid_disk >= 0) {
3902 if (mddev->pers->hot_remove_disk(mddev, r)) {
3903 /* Failed to revive this device, try next */
3904 r->flags = flags;
3905 continue;
3906 }
3907 } else
3908 r->raid_disk = r->saved_raid_disk = i;
3909
3910 clear_bit(Faulty, &r->flags);
3911 clear_bit(WriteErrorSeen, &r->flags);
3912
3913 if (mddev->pers->hot_add_disk(mddev, r)) {
3914 /* Failed to revive this device, try next */
3915 r->raid_disk = r->saved_raid_disk = -1;
3916 r->flags = flags;
3917 } else {
3918 clear_bit(In_sync, &r->flags);
3919 r->recovery_offset = 0;
3920 set_bit(i, (void *) cleared_failed_devices);
3921 cleared = true;
3922 }
3923 }
3924 }
3925
3926 /* If any failed devices could be cleared, update all sbs failed_devices bits */
3927 if (cleared) {
3928 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3929
3930 rdev_for_each(r, &rs->md) {
3931 if (test_bit(Journal, &r->flags))
3932 continue;
3933
3934 sb = page_address(r->sb_page);
3935 sb_retrieve_failed_devices(sb, failed_devices);
3936
3937 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3938 failed_devices[i] &= ~cleared_failed_devices[i];
3939
3940 sb_update_failed_devices(sb, failed_devices);
3941 }
3942 }
3943 }
3944
__load_dirty_region_bitmap(struct raid_set * rs)3945 static int __load_dirty_region_bitmap(struct raid_set *rs)
3946 {
3947 int r = 0;
3948
3949 /* Try loading the bitmap unless "raid0", which does not have one */
3950 if (!rs_is_raid0(rs) &&
3951 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3952 struct mddev *mddev = &rs->md;
3953
3954 r = mddev->bitmap_ops->load(mddev);
3955 if (r)
3956 DMERR("Failed to load bitmap");
3957 }
3958
3959 return r;
3960 }
3961
3962 /* Enforce updating all superblocks */
rs_update_sbs(struct raid_set * rs)3963 static void rs_update_sbs(struct raid_set *rs)
3964 {
3965 struct mddev *mddev = &rs->md;
3966 int ro = mddev->ro;
3967
3968 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3969 mddev->ro = 0;
3970 md_update_sb(mddev, 1);
3971 mddev->ro = ro;
3972 }
3973
3974 /*
3975 * Reshape changes raid algorithm of @rs to new one within personality
3976 * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes
3977 * disks from a raid set thus growing/shrinking it or resizes the set
3978 *
3979 * Call mddev_lock_nointr() before!
3980 */
rs_start_reshape(struct raid_set * rs)3981 static int rs_start_reshape(struct raid_set *rs)
3982 {
3983 int r;
3984 struct mddev *mddev = &rs->md;
3985 struct md_personality *pers = mddev->pers;
3986
3987 /* Don't allow the sync thread to work until the table gets reloaded. */
3988 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3989
3990 r = rs_setup_reshape(rs);
3991 if (r)
3992 return r;
3993
3994 /*
3995 * Check any reshape constraints enforced by the personalility
3996 *
3997 * May as well already kick the reshape off so that * pers->start_reshape() becomes optional.
3998 */
3999 r = pers->check_reshape(mddev);
4000 if (r) {
4001 rs->ti->error = "pers->check_reshape() failed";
4002 return r;
4003 }
4004
4005 /*
4006 * Personality may not provide start reshape method in which
4007 * case check_reshape above has already covered everything
4008 */
4009 if (pers->start_reshape) {
4010 r = pers->start_reshape(mddev);
4011 if (r) {
4012 rs->ti->error = "pers->start_reshape() failed";
4013 return r;
4014 }
4015 }
4016
4017 /*
4018 * Now reshape got set up, update superblocks to
4019 * reflect the fact so that a table reload will
4020 * access proper superblock content in the ctr.
4021 */
4022 rs_update_sbs(rs);
4023
4024 return 0;
4025 }
4026
raid_preresume(struct dm_target * ti)4027 static int raid_preresume(struct dm_target *ti)
4028 {
4029 int r;
4030 struct raid_set *rs = ti->private;
4031 struct mddev *mddev = &rs->md;
4032
4033 /* This is a resume after a suspend of the set -> it's already started. */
4034 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
4035 return 0;
4036
4037 /* If different and no explicit grow request, expose MD array size as of superblock. */
4038 if (!test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) &&
4039 rs->array_sectors != mddev->array_sectors)
4040 rs_set_capacity(rs);
4041
4042 /*
4043 * The superblocks need to be updated on disk if the
4044 * array is new or new devices got added (thus zeroed
4045 * out by userspace) or __load_dirty_region_bitmap
4046 * will overwrite them in core with old data or fail.
4047 */
4048 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
4049 rs_update_sbs(rs);
4050
4051 /* Load the bitmap from disk unless raid0 */
4052 r = __load_dirty_region_bitmap(rs);
4053 if (r)
4054 return r;
4055
4056 /* We are extending the raid set size, adjust mddev/md_rdev sizes and set capacity. */
4057 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
4058 mddev->array_sectors = rs->array_sectors;
4059 mddev->dev_sectors = rs->dev_sectors;
4060 rs_set_rdev_sectors(rs);
4061 rs_set_capacity(rs);
4062 }
4063
4064 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
4065 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
4066 (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
4067 (rs->requested_bitmap_chunk_sectors &&
4068 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
4069 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
4070
4071 r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
4072 chunksize, false);
4073 if (r)
4074 DMERR("Failed to resize bitmap");
4075 }
4076
4077 /* Check for any resize/reshape on @rs and adjust/initiate */
4078 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
4079 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4080 mddev->resync_min = mddev->recovery_cp;
4081 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
4082 mddev->resync_max_sectors = mddev->dev_sectors;
4083 }
4084
4085 /* Check for any reshape request unless new raid set */
4086 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
4087 /* Initiate a reshape. */
4088 rs_set_rdev_sectors(rs);
4089 mddev_lock_nointr(mddev);
4090 r = rs_start_reshape(rs);
4091 mddev_unlock(mddev);
4092 if (r)
4093 DMWARN("Failed to check/start reshape, continuing without change");
4094 r = 0;
4095 }
4096
4097 return r;
4098 }
4099
raid_resume(struct dm_target * ti)4100 static void raid_resume(struct dm_target *ti)
4101 {
4102 struct raid_set *rs = ti->private;
4103 struct mddev *mddev = &rs->md;
4104
4105 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
4106 /*
4107 * A secondary resume while the device is active.
4108 * Take this opportunity to check whether any failed
4109 * devices are reachable again.
4110 */
4111 mddev_lock_nointr(mddev);
4112 attempt_restore_of_faulty_devices(rs);
4113 mddev_unlock(mddev);
4114 }
4115
4116 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
4117 /* Only reduce raid set size before running a disk removing reshape. */
4118 if (mddev->delta_disks < 0)
4119 rs_set_capacity(rs);
4120
4121 mddev_lock_nointr(mddev);
4122 WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery));
4123 WARN_ON_ONCE(rcu_dereference_protected(mddev->sync_thread,
4124 lockdep_is_held(&mddev->reconfig_mutex)));
4125 clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
4126 mddev->ro = 0;
4127 mddev->in_sync = 0;
4128 md_unfrozen_sync_thread(mddev);
4129 mddev_unlock_and_resume(mddev);
4130 }
4131 }
4132
4133 static struct target_type raid_target = {
4134 .name = "raid",
4135 .version = {1, 15, 1},
4136 .module = THIS_MODULE,
4137 .ctr = raid_ctr,
4138 .dtr = raid_dtr,
4139 .map = raid_map,
4140 .status = raid_status,
4141 .message = raid_message,
4142 .iterate_devices = raid_iterate_devices,
4143 .io_hints = raid_io_hints,
4144 .presuspend = raid_presuspend,
4145 .presuspend_undo = raid_presuspend_undo,
4146 .postsuspend = raid_postsuspend,
4147 .preresume = raid_preresume,
4148 .resume = raid_resume,
4149 };
4150 module_dm(raid);
4151
4152 module_param(devices_handle_discard_safely, bool, 0644);
4153 MODULE_PARM_DESC(devices_handle_discard_safely,
4154 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
4155
4156 MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
4157 MODULE_ALIAS("dm-raid0");
4158 MODULE_ALIAS("dm-raid1");
4159 MODULE_ALIAS("dm-raid10");
4160 MODULE_ALIAS("dm-raid4");
4161 MODULE_ALIAS("dm-raid5");
4162 MODULE_ALIAS("dm-raid6");
4163 MODULE_AUTHOR("Neil Brown <dm-devel@lists.linux.dev>");
4164 MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@lists.linux.dev>");
4165 MODULE_LICENSE("GPL");
4166