xref: /linux/drivers/md/raid1.h (revision 9f2c9d12bcc53fcb3b787023723754e84d1aef8b)
1ef740c37SChristoph Hellwig #ifndef _RAID1_H
2ef740c37SChristoph Hellwig #define _RAID1_H
3ef740c37SChristoph Hellwig 
4ef740c37SChristoph Hellwig typedef struct mirror_info mirror_info_t;
5ef740c37SChristoph Hellwig 
6ef740c37SChristoph Hellwig struct mirror_info {
73cb03002SNeilBrown 	struct md_rdev	*rdev;
8ef740c37SChristoph Hellwig 	sector_t	head_position;
9ef740c37SChristoph Hellwig };
10ef740c37SChristoph Hellwig 
11ef740c37SChristoph Hellwig /*
12ef740c37SChristoph Hellwig  * memory pools need a pointer to the mddev, so they can force an unplug
13ef740c37SChristoph Hellwig  * when memory is tight, and a count of the number of drives that the
14ef740c37SChristoph Hellwig  * pool was allocated for, so they know how much to allocate and free.
15ef740c37SChristoph Hellwig  * mddev->raid_disks cannot be used, as it can change while a pool is active
16ef740c37SChristoph Hellwig  * These two datums are stored in a kmalloced struct.
17ef740c37SChristoph Hellwig  */
18ef740c37SChristoph Hellwig 
19ef740c37SChristoph Hellwig struct pool_info {
20fd01b88cSNeilBrown 	struct mddev *mddev;
21ef740c37SChristoph Hellwig 	int	raid_disks;
22ef740c37SChristoph Hellwig };
23ef740c37SChristoph Hellwig 
24ef740c37SChristoph Hellwig struct r1_private_data_s {
25fd01b88cSNeilBrown 	struct mddev		*mddev;
26ef740c37SChristoph Hellwig 	mirror_info_t		*mirrors;
27ef740c37SChristoph Hellwig 	int			raid_disks;
28ce550c20SNeilBrown 
29ce550c20SNeilBrown 	/* When choose the best device for a read (read_balance())
30ce550c20SNeilBrown 	 * we try to keep sequential reads one the same device
31ce550c20SNeilBrown 	 * using 'last_used' and 'next_seq_sect'
32ce550c20SNeilBrown 	 */
33ef740c37SChristoph Hellwig 	int			last_used;
34ef740c37SChristoph Hellwig 	sector_t		next_seq_sect;
35ce550c20SNeilBrown 	/* During resync, read_balancing is only allowed on the part
36ce550c20SNeilBrown 	 * of the array that has been resynced.  'next_resync' tells us
37ce550c20SNeilBrown 	 * where that is.
38ce550c20SNeilBrown 	 */
39ce550c20SNeilBrown 	sector_t		next_resync;
40ce550c20SNeilBrown 
41ef740c37SChristoph Hellwig 	spinlock_t		device_lock;
42ef740c37SChristoph Hellwig 
43*9f2c9d12SNeilBrown 	/* list of 'struct r1bio' that need to be processed by raid1d,
44*9f2c9d12SNeilBrown 	 * whether to retry a read, writeout a resync or recovery
45*9f2c9d12SNeilBrown 	 * block, or anything else.
46ce550c20SNeilBrown 	 */
47ef740c37SChristoph Hellwig 	struct list_head	retry_list;
48ce550c20SNeilBrown 
49ce550c20SNeilBrown 	/* queue pending writes to be submitted on unplug */
50ef740c37SChristoph Hellwig 	struct bio_list		pending_bio_list;
51ef740c37SChristoph Hellwig 
52ce550c20SNeilBrown 	/* for use when syncing mirrors:
53ce550c20SNeilBrown 	 * We don't allow both normal IO and resync/recovery IO at
54ce550c20SNeilBrown 	 * the same time - resync/recovery can only happen when there
55ce550c20SNeilBrown 	 * is no other IO.  So when either is active, the other has to wait.
56ce550c20SNeilBrown 	 * See more details description in raid1.c near raise_barrier().
57ce550c20SNeilBrown 	 */
58ce550c20SNeilBrown 	wait_queue_head_t	wait_barrier;
59ef740c37SChristoph Hellwig 	spinlock_t		resync_lock;
60ef740c37SChristoph Hellwig 	int			nr_pending;
61ef740c37SChristoph Hellwig 	int			nr_waiting;
62ef740c37SChristoph Hellwig 	int			nr_queued;
63ef740c37SChristoph Hellwig 	int			barrier;
64ce550c20SNeilBrown 
65ce550c20SNeilBrown 	/* Set to 1 if a full sync is needed, (fresh device added).
66ef740c37SChristoph Hellwig 	 * Cleared when a sync completes.
67ef740c37SChristoph Hellwig 	 */
68ce550c20SNeilBrown 	int			fullsync;
69ce550c20SNeilBrown 
70ce550c20SNeilBrown 	/* When the same as mddev->recovery_disabled we don't allow
71ce550c20SNeilBrown 	 * recovery to be attempted as we expect a read error.
725389042fSNeilBrown 	 */
73ce550c20SNeilBrown 	int			recovery_disabled;
74ef740c37SChristoph Hellwig 
75ef740c37SChristoph Hellwig 
76ce550c20SNeilBrown 	/* poolinfo contains information about the content of the
77ce550c20SNeilBrown 	 * mempools - it changes when the array grows or shrinks
78ce550c20SNeilBrown 	 */
79ef740c37SChristoph Hellwig 	struct pool_info	*poolinfo;
80ef740c37SChristoph Hellwig 	mempool_t		*r1bio_pool;
81ef740c37SChristoph Hellwig 	mempool_t		*r1buf_pool;
82709ae487SNeilBrown 
83ce550c20SNeilBrown 	/* temporary buffer to synchronous IO when attempting to repair
84ce550c20SNeilBrown 	 * a read error.
85ce550c20SNeilBrown 	 */
86ce550c20SNeilBrown 	struct page		*tmppage;
87ce550c20SNeilBrown 
88ce550c20SNeilBrown 
89709ae487SNeilBrown 	/* When taking over an array from a different personality, we store
90709ae487SNeilBrown 	 * the new thread here until we fully activate the array.
91709ae487SNeilBrown 	 */
922b8bf345SNeilBrown 	struct md_thread	*thread;
93ef740c37SChristoph Hellwig };
94ef740c37SChristoph Hellwig 
95ef740c37SChristoph Hellwig typedef struct r1_private_data_s conf_t;
96ef740c37SChristoph Hellwig 
97ef740c37SChristoph Hellwig /*
98ef740c37SChristoph Hellwig  * this is our 'private' RAID1 bio.
99ef740c37SChristoph Hellwig  *
100ef740c37SChristoph Hellwig  * it contains information about what kind of IO operations were started
101ef740c37SChristoph Hellwig  * for this RAID1 operation, and about their status:
102ef740c37SChristoph Hellwig  */
103ef740c37SChristoph Hellwig 
104*9f2c9d12SNeilBrown struct r1bio {
105ef740c37SChristoph Hellwig 	atomic_t		remaining; /* 'have we finished' count,
106ef740c37SChristoph Hellwig 					    * used from IRQ handlers
107ef740c37SChristoph Hellwig 					    */
108ef740c37SChristoph Hellwig 	atomic_t		behind_remaining; /* number of write-behind ios remaining
109ef740c37SChristoph Hellwig 						 * in this BehindIO request
110ef740c37SChristoph Hellwig 						 */
111ef740c37SChristoph Hellwig 	sector_t		sector;
112ef740c37SChristoph Hellwig 	int			sectors;
113ef740c37SChristoph Hellwig 	unsigned long		state;
114fd01b88cSNeilBrown 	struct mddev		*mddev;
115ef740c37SChristoph Hellwig 	/*
116ef740c37SChristoph Hellwig 	 * original bio going to /dev/mdx
117ef740c37SChristoph Hellwig 	 */
118ef740c37SChristoph Hellwig 	struct bio		*master_bio;
119ef740c37SChristoph Hellwig 	/*
120ef740c37SChristoph Hellwig 	 * if the IO is in READ direction, then this is where we read
121ef740c37SChristoph Hellwig 	 */
122ef740c37SChristoph Hellwig 	int			read_disk;
123ef740c37SChristoph Hellwig 
124ef740c37SChristoph Hellwig 	struct list_head	retry_list;
125af6d7b76SNeilBrown 	/* Next two are only valid when R1BIO_BehindIO is set */
1262ca68f5eSNeilBrown 	struct bio_vec		*behind_bvecs;
127af6d7b76SNeilBrown 	int			behind_page_count;
128ef740c37SChristoph Hellwig 	/*
129ef740c37SChristoph Hellwig 	 * if the IO is in WRITE direction, then multiple bios are used.
130ef740c37SChristoph Hellwig 	 * We choose the number when they are allocated.
131ef740c37SChristoph Hellwig 	 */
132ef740c37SChristoph Hellwig 	struct bio		*bios[0];
133ef740c37SChristoph Hellwig 	/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
134ef740c37SChristoph Hellwig };
135ef740c37SChristoph Hellwig 
136ef740c37SChristoph Hellwig /* when we get a read error on a read-only array, we redirect to another
137ef740c37SChristoph Hellwig  * device without failing the first device, or trying to over-write to
138ef740c37SChristoph Hellwig  * correct the read error.  To keep track of bad blocks on a per-bio
139ef740c37SChristoph Hellwig  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
140ef740c37SChristoph Hellwig  */
141ef740c37SChristoph Hellwig #define IO_BLOCKED ((struct bio *)1)
1424367af55SNeilBrown /* When we successfully write to a known bad-block, we need to remove the
1434367af55SNeilBrown  * bad-block marking which must be done from process context.  So we record
1444367af55SNeilBrown  * the success by setting bios[n] to IO_MADE_GOOD
1454367af55SNeilBrown  */
1464367af55SNeilBrown #define IO_MADE_GOOD ((struct bio *)2)
1474367af55SNeilBrown 
1484367af55SNeilBrown #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
149ef740c37SChristoph Hellwig 
150ef740c37SChristoph Hellwig /* bits for r1bio.state */
151ef740c37SChristoph Hellwig #define	R1BIO_Uptodate	0
152ef740c37SChristoph Hellwig #define	R1BIO_IsSync	1
153ef740c37SChristoph Hellwig #define	R1BIO_Degraded	2
154ef740c37SChristoph Hellwig #define	R1BIO_BehindIO	3
155d2eb35acSNeilBrown /* Set ReadError on bios that experience a readerror so that
156d2eb35acSNeilBrown  * raid1d knows what to do with them.
157d2eb35acSNeilBrown  */
158d2eb35acSNeilBrown #define R1BIO_ReadError 4
159ef740c37SChristoph Hellwig /* For write-behind requests, we call bi_end_io when
160ef740c37SChristoph Hellwig  * the last non-write-behind device completes, providing
161ef740c37SChristoph Hellwig  * any write was successful.  Otherwise we call when
162ef740c37SChristoph Hellwig  * any write-behind write succeeds, otherwise we call
163ef740c37SChristoph Hellwig  * with failure when last write completes (and all failed).
164ef740c37SChristoph Hellwig  * Record that bi_end_io was called with this flag...
165ef740c37SChristoph Hellwig  */
166ef740c37SChristoph Hellwig #define	R1BIO_Returned 6
1674367af55SNeilBrown /* If a write for this request means we can clear some
1684367af55SNeilBrown  * known-bad-block records, we set this flag
1694367af55SNeilBrown  */
1704367af55SNeilBrown #define	R1BIO_MadeGood 7
171cd5ff9a1SNeilBrown #define	R1BIO_WriteError 8
172ef740c37SChristoph Hellwig 
173fd01b88cSNeilBrown extern int md_raid1_congested(struct mddev *mddev, int bits);
1741ed7242eSJonathan Brassow 
175ef740c37SChristoph Hellwig #endif
176