xref: /linux/drivers/md/raid1.h (revision e8096360476689898f038feebf5b352c9ec43a2a)
1ef740c37SChristoph Hellwig #ifndef _RAID1_H
2ef740c37SChristoph Hellwig #define _RAID1_H
3ef740c37SChristoph Hellwig 
4ef740c37SChristoph Hellwig struct mirror_info {
53cb03002SNeilBrown 	struct md_rdev	*rdev;
6ef740c37SChristoph Hellwig 	sector_t	head_position;
7ef740c37SChristoph Hellwig };
8ef740c37SChristoph Hellwig 
9ef740c37SChristoph Hellwig /*
10ef740c37SChristoph Hellwig  * memory pools need a pointer to the mddev, so they can force an unplug
11ef740c37SChristoph Hellwig  * when memory is tight, and a count of the number of drives that the
12ef740c37SChristoph Hellwig  * pool was allocated for, so they know how much to allocate and free.
13ef740c37SChristoph Hellwig  * mddev->raid_disks cannot be used, as it can change while a pool is active
14ef740c37SChristoph Hellwig  * These two datums are stored in a kmalloced struct.
15ef740c37SChristoph Hellwig  */
16ef740c37SChristoph Hellwig 
17ef740c37SChristoph Hellwig struct pool_info {
18fd01b88cSNeilBrown 	struct mddev *mddev;
19ef740c37SChristoph Hellwig 	int	raid_disks;
20ef740c37SChristoph Hellwig };
21ef740c37SChristoph Hellwig 
22*e8096360SNeilBrown struct r1conf {
23fd01b88cSNeilBrown 	struct mddev		*mddev;
240f6d02d5SNeilBrown 	struct mirror_info		*mirrors;
25ef740c37SChristoph Hellwig 	int			raid_disks;
26ce550c20SNeilBrown 
27ce550c20SNeilBrown 	/* When choose the best device for a read (read_balance())
28ce550c20SNeilBrown 	 * we try to keep sequential reads one the same device
29ce550c20SNeilBrown 	 * using 'last_used' and 'next_seq_sect'
30ce550c20SNeilBrown 	 */
31ef740c37SChristoph Hellwig 	int			last_used;
32ef740c37SChristoph Hellwig 	sector_t		next_seq_sect;
33ce550c20SNeilBrown 	/* During resync, read_balancing is only allowed on the part
34ce550c20SNeilBrown 	 * of the array that has been resynced.  'next_resync' tells us
35ce550c20SNeilBrown 	 * where that is.
36ce550c20SNeilBrown 	 */
37ce550c20SNeilBrown 	sector_t		next_resync;
38ce550c20SNeilBrown 
39ef740c37SChristoph Hellwig 	spinlock_t		device_lock;
40ef740c37SChristoph Hellwig 
419f2c9d12SNeilBrown 	/* list of 'struct r1bio' that need to be processed by raid1d,
429f2c9d12SNeilBrown 	 * whether to retry a read, writeout a resync or recovery
439f2c9d12SNeilBrown 	 * block, or anything else.
44ce550c20SNeilBrown 	 */
45ef740c37SChristoph Hellwig 	struct list_head	retry_list;
46ce550c20SNeilBrown 
47ce550c20SNeilBrown 	/* queue pending writes to be submitted on unplug */
48ef740c37SChristoph Hellwig 	struct bio_list		pending_bio_list;
49ef740c37SChristoph Hellwig 
50ce550c20SNeilBrown 	/* for use when syncing mirrors:
51ce550c20SNeilBrown 	 * We don't allow both normal IO and resync/recovery IO at
52ce550c20SNeilBrown 	 * the same time - resync/recovery can only happen when there
53ce550c20SNeilBrown 	 * is no other IO.  So when either is active, the other has to wait.
54ce550c20SNeilBrown 	 * See more details description in raid1.c near raise_barrier().
55ce550c20SNeilBrown 	 */
56ce550c20SNeilBrown 	wait_queue_head_t	wait_barrier;
57ef740c37SChristoph Hellwig 	spinlock_t		resync_lock;
58ef740c37SChristoph Hellwig 	int			nr_pending;
59ef740c37SChristoph Hellwig 	int			nr_waiting;
60ef740c37SChristoph Hellwig 	int			nr_queued;
61ef740c37SChristoph Hellwig 	int			barrier;
62ce550c20SNeilBrown 
63ce550c20SNeilBrown 	/* Set to 1 if a full sync is needed, (fresh device added).
64ef740c37SChristoph Hellwig 	 * Cleared when a sync completes.
65ef740c37SChristoph Hellwig 	 */
66ce550c20SNeilBrown 	int			fullsync;
67ce550c20SNeilBrown 
68ce550c20SNeilBrown 	/* When the same as mddev->recovery_disabled we don't allow
69ce550c20SNeilBrown 	 * recovery to be attempted as we expect a read error.
705389042fSNeilBrown 	 */
71ce550c20SNeilBrown 	int			recovery_disabled;
72ef740c37SChristoph Hellwig 
73ef740c37SChristoph Hellwig 
74ce550c20SNeilBrown 	/* poolinfo contains information about the content of the
75ce550c20SNeilBrown 	 * mempools - it changes when the array grows or shrinks
76ce550c20SNeilBrown 	 */
77ef740c37SChristoph Hellwig 	struct pool_info	*poolinfo;
78ef740c37SChristoph Hellwig 	mempool_t		*r1bio_pool;
79ef740c37SChristoph Hellwig 	mempool_t		*r1buf_pool;
80709ae487SNeilBrown 
81ce550c20SNeilBrown 	/* temporary buffer to synchronous IO when attempting to repair
82ce550c20SNeilBrown 	 * a read error.
83ce550c20SNeilBrown 	 */
84ce550c20SNeilBrown 	struct page		*tmppage;
85ce550c20SNeilBrown 
86ce550c20SNeilBrown 
87709ae487SNeilBrown 	/* When taking over an array from a different personality, we store
88709ae487SNeilBrown 	 * the new thread here until we fully activate the array.
89709ae487SNeilBrown 	 */
902b8bf345SNeilBrown 	struct md_thread	*thread;
91ef740c37SChristoph Hellwig };
92ef740c37SChristoph Hellwig 
93ef740c37SChristoph Hellwig /*
94ef740c37SChristoph Hellwig  * this is our 'private' RAID1 bio.
95ef740c37SChristoph Hellwig  *
96ef740c37SChristoph Hellwig  * it contains information about what kind of IO operations were started
97ef740c37SChristoph Hellwig  * for this RAID1 operation, and about their status:
98ef740c37SChristoph Hellwig  */
99ef740c37SChristoph Hellwig 
1009f2c9d12SNeilBrown struct r1bio {
101ef740c37SChristoph Hellwig 	atomic_t		remaining; /* 'have we finished' count,
102ef740c37SChristoph Hellwig 					    * used from IRQ handlers
103ef740c37SChristoph Hellwig 					    */
104ef740c37SChristoph Hellwig 	atomic_t		behind_remaining; /* number of write-behind ios remaining
105ef740c37SChristoph Hellwig 						 * in this BehindIO request
106ef740c37SChristoph Hellwig 						 */
107ef740c37SChristoph Hellwig 	sector_t		sector;
108ef740c37SChristoph Hellwig 	int			sectors;
109ef740c37SChristoph Hellwig 	unsigned long		state;
110fd01b88cSNeilBrown 	struct mddev		*mddev;
111ef740c37SChristoph Hellwig 	/*
112ef740c37SChristoph Hellwig 	 * original bio going to /dev/mdx
113ef740c37SChristoph Hellwig 	 */
114ef740c37SChristoph Hellwig 	struct bio		*master_bio;
115ef740c37SChristoph Hellwig 	/*
116ef740c37SChristoph Hellwig 	 * if the IO is in READ direction, then this is where we read
117ef740c37SChristoph Hellwig 	 */
118ef740c37SChristoph Hellwig 	int			read_disk;
119ef740c37SChristoph Hellwig 
120ef740c37SChristoph Hellwig 	struct list_head	retry_list;
121af6d7b76SNeilBrown 	/* Next two are only valid when R1BIO_BehindIO is set */
1222ca68f5eSNeilBrown 	struct bio_vec		*behind_bvecs;
123af6d7b76SNeilBrown 	int			behind_page_count;
124ef740c37SChristoph Hellwig 	/*
125ef740c37SChristoph Hellwig 	 * if the IO is in WRITE direction, then multiple bios are used.
126ef740c37SChristoph Hellwig 	 * We choose the number when they are allocated.
127ef740c37SChristoph Hellwig 	 */
128ef740c37SChristoph Hellwig 	struct bio		*bios[0];
129ef740c37SChristoph Hellwig 	/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
130ef740c37SChristoph Hellwig };
131ef740c37SChristoph Hellwig 
132ef740c37SChristoph Hellwig /* when we get a read error on a read-only array, we redirect to another
133ef740c37SChristoph Hellwig  * device without failing the first device, or trying to over-write to
134ef740c37SChristoph Hellwig  * correct the read error.  To keep track of bad blocks on a per-bio
135ef740c37SChristoph Hellwig  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
136ef740c37SChristoph Hellwig  */
137ef740c37SChristoph Hellwig #define IO_BLOCKED ((struct bio *)1)
1384367af55SNeilBrown /* When we successfully write to a known bad-block, we need to remove the
1394367af55SNeilBrown  * bad-block marking which must be done from process context.  So we record
1404367af55SNeilBrown  * the success by setting bios[n] to IO_MADE_GOOD
1414367af55SNeilBrown  */
1424367af55SNeilBrown #define IO_MADE_GOOD ((struct bio *)2)
1434367af55SNeilBrown 
1444367af55SNeilBrown #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
145ef740c37SChristoph Hellwig 
146ef740c37SChristoph Hellwig /* bits for r1bio.state */
147ef740c37SChristoph Hellwig #define	R1BIO_Uptodate	0
148ef740c37SChristoph Hellwig #define	R1BIO_IsSync	1
149ef740c37SChristoph Hellwig #define	R1BIO_Degraded	2
150ef740c37SChristoph Hellwig #define	R1BIO_BehindIO	3
151d2eb35acSNeilBrown /* Set ReadError on bios that experience a readerror so that
152d2eb35acSNeilBrown  * raid1d knows what to do with them.
153d2eb35acSNeilBrown  */
154d2eb35acSNeilBrown #define R1BIO_ReadError 4
155ef740c37SChristoph Hellwig /* For write-behind requests, we call bi_end_io when
156ef740c37SChristoph Hellwig  * the last non-write-behind device completes, providing
157ef740c37SChristoph Hellwig  * any write was successful.  Otherwise we call when
158ef740c37SChristoph Hellwig  * any write-behind write succeeds, otherwise we call
159ef740c37SChristoph Hellwig  * with failure when last write completes (and all failed).
160ef740c37SChristoph Hellwig  * Record that bi_end_io was called with this flag...
161ef740c37SChristoph Hellwig  */
162ef740c37SChristoph Hellwig #define	R1BIO_Returned 6
1634367af55SNeilBrown /* If a write for this request means we can clear some
1644367af55SNeilBrown  * known-bad-block records, we set this flag
1654367af55SNeilBrown  */
1664367af55SNeilBrown #define	R1BIO_MadeGood 7
167cd5ff9a1SNeilBrown #define	R1BIO_WriteError 8
168ef740c37SChristoph Hellwig 
169fd01b88cSNeilBrown extern int md_raid1_congested(struct mddev *mddev, int bits);
1701ed7242eSJonathan Brassow 
171ef740c37SChristoph Hellwig #endif
172