xref: /linux/drivers/md/raid10.h (revision a67ff6a54095e27093ea501fb143fefe51a536c2)
1 #ifndef _RAID10_H
2 #define _RAID10_H
3 
4 struct mirror_info {
5 	struct md_rdev	*rdev;
6 	sector_t	head_position;
7 	int		recovery_disabled;	/* matches
8 						 * mddev->recovery_disabled
9 						 * when we shouldn't try
10 						 * recovering this device.
11 						 */
12 };
13 
14 struct r10conf {
15 	struct mddev		*mddev;
16 	struct mirror_info	*mirrors;
17 	int			raid_disks;
18 	spinlock_t		device_lock;
19 
20 	/* geometry */
21 	int			near_copies;  /* number of copies laid out raid0 style */
22 	int 			far_copies;   /* number of copies laid out
23 					       * at large strides across drives
24 					       */
25 	int			far_offset;   /* far_copies are offset by 1 stripe
26 					       * instead of many
27 					       */
28 	int			copies;	      /* near_copies * far_copies.
29 					       * must be <= raid_disks
30 					       */
31 	sector_t		stride;	      /* distance between far copies.
32 					       * This is size / far_copies unless
33 					       * far_offset, in which case it is
34 					       * 1 stripe.
35 					       */
36 
37 	sector_t		dev_sectors;  /* temp copy of mddev->dev_sectors */
38 
39 	int chunk_shift; /* shift from chunks to sectors */
40 	sector_t chunk_mask;
41 
42 	struct list_head	retry_list;
43 	/* queue pending writes and submit them on unplug */
44 	struct bio_list		pending_bio_list;
45 	int			pending_count;
46 
47 	spinlock_t		resync_lock;
48 	int nr_pending;
49 	int nr_waiting;
50 	int nr_queued;
51 	int barrier;
52 	sector_t		next_resync;
53 	int			fullsync;  /* set to 1 if a full sync is needed,
54 					    * (fresh device added).
55 					    * Cleared when a sync completes.
56 					    */
57 
58 	wait_queue_head_t	wait_barrier;
59 
60 	mempool_t *r10bio_pool;
61 	mempool_t *r10buf_pool;
62 	struct page		*tmppage;
63 
64 	/* When taking over an array from a different personality, we store
65 	 * the new thread here until we fully activate the array.
66 	 */
67 	struct md_thread	*thread;
68 };
69 
70 /*
71  * this is our 'private' RAID10 bio.
72  *
73  * it contains information about what kind of IO operations were started
74  * for this RAID10 operation, and about their status:
75  */
76 
77 struct r10bio {
78 	atomic_t		remaining; /* 'have we finished' count,
79 					    * used from IRQ handlers
80 					    */
81 	sector_t		sector;	/* virtual sector number */
82 	int			sectors;
83 	unsigned long		state;
84 	struct mddev		*mddev;
85 	/*
86 	 * original bio going to /dev/mdx
87 	 */
88 	struct bio		*master_bio;
89 	/*
90 	 * if the IO is in READ direction, then this is where we read
91 	 */
92 	int			read_slot;
93 
94 	struct list_head	retry_list;
95 	/*
96 	 * if the IO is in WRITE direction, then multiple bios are used,
97 	 * one for each copy.
98 	 * When resyncing we also use one for each copy.
99 	 * When reconstructing, we use 2 bios, one for read, one for write.
100 	 * We choose the number when they are allocated.
101 	 */
102 	struct {
103 		struct bio		*bio;
104 		sector_t addr;
105 		int devnum;
106 	} devs[0];
107 };
108 
109 /* when we get a read error on a read-only array, we redirect to another
110  * device without failing the first device, or trying to over-write to
111  * correct the read error.  To keep track of bad blocks on a per-bio
112  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
113  */
114 #define IO_BLOCKED ((struct bio*)1)
115 /* When we successfully write to a known bad-block, we need to remove the
116  * bad-block marking which must be done from process context.  So we record
117  * the success by setting devs[n].bio to IO_MADE_GOOD
118  */
119 #define IO_MADE_GOOD ((struct bio *)2)
120 
121 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
122 
123 /* bits for r10bio.state */
124 #define	R10BIO_Uptodate	0
125 #define	R10BIO_IsSync	1
126 #define	R10BIO_IsRecover 2
127 #define	R10BIO_Degraded 3
128 /* Set ReadError on bios that experience a read error
129  * so that raid10d knows what to do with them.
130  */
131 #define	R10BIO_ReadError 4
132 /* If a write for this request means we can clear some
133  * known-bad-block records, we set this flag.
134  */
135 #define	R10BIO_MadeGood 5
136 #define	R10BIO_WriteError 6
137 #endif
138