xref: /linux/drivers/md/raid1.h (revision 709ae4879ae33628ded276ce7da8cd5acfec476b)
1ef740c37SChristoph Hellwig #ifndef _RAID1_H
2ef740c37SChristoph Hellwig #define _RAID1_H
3ef740c37SChristoph Hellwig 
4ef740c37SChristoph Hellwig typedef struct mirror_info mirror_info_t;
5ef740c37SChristoph Hellwig 
6ef740c37SChristoph Hellwig struct mirror_info {
7ef740c37SChristoph Hellwig 	mdk_rdev_t	*rdev;
8ef740c37SChristoph Hellwig 	sector_t	head_position;
9ef740c37SChristoph Hellwig };
10ef740c37SChristoph Hellwig 
11ef740c37SChristoph Hellwig /*
12ef740c37SChristoph Hellwig  * memory pools need a pointer to the mddev, so they can force an unplug
13ef740c37SChristoph Hellwig  * when memory is tight, and a count of the number of drives that the
14ef740c37SChristoph Hellwig  * pool was allocated for, so they know how much to allocate and free.
15ef740c37SChristoph Hellwig  * mddev->raid_disks cannot be used, as it can change while a pool is active
16ef740c37SChristoph Hellwig  * These two datums are stored in a kmalloced struct.
17ef740c37SChristoph Hellwig  */
18ef740c37SChristoph Hellwig 
19ef740c37SChristoph Hellwig struct pool_info {
20ef740c37SChristoph Hellwig 	mddev_t *mddev;
21ef740c37SChristoph Hellwig 	int	raid_disks;
22ef740c37SChristoph Hellwig };
23ef740c37SChristoph Hellwig 
24ef740c37SChristoph Hellwig 
25ef740c37SChristoph Hellwig typedef struct r1bio_s r1bio_t;
26ef740c37SChristoph Hellwig 
27ef740c37SChristoph Hellwig struct r1_private_data_s {
28ef740c37SChristoph Hellwig 	mddev_t			*mddev;
29ef740c37SChristoph Hellwig 	mirror_info_t		*mirrors;
30ef740c37SChristoph Hellwig 	int			raid_disks;
31ef740c37SChristoph Hellwig 	int			last_used;
32ef740c37SChristoph Hellwig 	sector_t		next_seq_sect;
33ef740c37SChristoph Hellwig 	spinlock_t		device_lock;
34ef740c37SChristoph Hellwig 
35ef740c37SChristoph Hellwig 	struct list_head	retry_list;
36ef740c37SChristoph Hellwig 	/* queue pending writes and submit them on unplug */
37ef740c37SChristoph Hellwig 	struct bio_list		pending_bio_list;
38ef740c37SChristoph Hellwig 	/* queue of writes that have been unplugged */
39ef740c37SChristoph Hellwig 	struct bio_list		flushing_bio_list;
40ef740c37SChristoph Hellwig 
41ef740c37SChristoph Hellwig 	/* for use when syncing mirrors: */
42ef740c37SChristoph Hellwig 
43ef740c37SChristoph Hellwig 	spinlock_t		resync_lock;
44ef740c37SChristoph Hellwig 	int			nr_pending;
45ef740c37SChristoph Hellwig 	int			nr_waiting;
46ef740c37SChristoph Hellwig 	int			nr_queued;
47ef740c37SChristoph Hellwig 	int			barrier;
48ef740c37SChristoph Hellwig 	sector_t		next_resync;
49ef740c37SChristoph Hellwig 	int			fullsync;  /* set to 1 if a full sync is needed,
50ef740c37SChristoph Hellwig 					    * (fresh device added).
51ef740c37SChristoph Hellwig 					    * Cleared when a sync completes.
52ef740c37SChristoph Hellwig 					    */
53ef740c37SChristoph Hellwig 
54ef740c37SChristoph Hellwig 	wait_queue_head_t	wait_barrier;
55ef740c37SChristoph Hellwig 
56ef740c37SChristoph Hellwig 	struct pool_info	*poolinfo;
57ef740c37SChristoph Hellwig 
58ef740c37SChristoph Hellwig 	struct page		*tmppage;
59ef740c37SChristoph Hellwig 
60ef740c37SChristoph Hellwig 	mempool_t *r1bio_pool;
61ef740c37SChristoph Hellwig 	mempool_t *r1buf_pool;
62*709ae487SNeilBrown 
63*709ae487SNeilBrown 	/* When taking over an array from a different personality, we store
64*709ae487SNeilBrown 	 * the new thread here until we fully activate the array.
65*709ae487SNeilBrown 	 */
66*709ae487SNeilBrown 	struct mdk_thread_s	*thread;
67ef740c37SChristoph Hellwig };
68ef740c37SChristoph Hellwig 
69ef740c37SChristoph Hellwig typedef struct r1_private_data_s conf_t;
70ef740c37SChristoph Hellwig 
71ef740c37SChristoph Hellwig /*
72ef740c37SChristoph Hellwig  * this is our 'private' RAID1 bio.
73ef740c37SChristoph Hellwig  *
74ef740c37SChristoph Hellwig  * it contains information about what kind of IO operations were started
75ef740c37SChristoph Hellwig  * for this RAID1 operation, and about their status:
76ef740c37SChristoph Hellwig  */
77ef740c37SChristoph Hellwig 
78ef740c37SChristoph Hellwig struct r1bio_s {
79ef740c37SChristoph Hellwig 	atomic_t		remaining; /* 'have we finished' count,
80ef740c37SChristoph Hellwig 					    * used from IRQ handlers
81ef740c37SChristoph Hellwig 					    */
82ef740c37SChristoph Hellwig 	atomic_t		behind_remaining; /* number of write-behind ios remaining
83ef740c37SChristoph Hellwig 						 * in this BehindIO request
84ef740c37SChristoph Hellwig 						 */
85ef740c37SChristoph Hellwig 	sector_t		sector;
86ef740c37SChristoph Hellwig 	int			sectors;
87ef740c37SChristoph Hellwig 	unsigned long		state;
88ef740c37SChristoph Hellwig 	mddev_t			*mddev;
89ef740c37SChristoph Hellwig 	/*
90ef740c37SChristoph Hellwig 	 * original bio going to /dev/mdx
91ef740c37SChristoph Hellwig 	 */
92ef740c37SChristoph Hellwig 	struct bio		*master_bio;
93ef740c37SChristoph Hellwig 	/*
94ef740c37SChristoph Hellwig 	 * if the IO is in READ direction, then this is where we read
95ef740c37SChristoph Hellwig 	 */
96ef740c37SChristoph Hellwig 	int			read_disk;
97ef740c37SChristoph Hellwig 
98ef740c37SChristoph Hellwig 	struct list_head	retry_list;
99ef740c37SChristoph Hellwig 	struct bitmap_update	*bitmap_update;
100ef740c37SChristoph Hellwig 	/*
101ef740c37SChristoph Hellwig 	 * if the IO is in WRITE direction, then multiple bios are used.
102ef740c37SChristoph Hellwig 	 * We choose the number when they are allocated.
103ef740c37SChristoph Hellwig 	 */
104ef740c37SChristoph Hellwig 	struct bio		*bios[0];
105ef740c37SChristoph Hellwig 	/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
106ef740c37SChristoph Hellwig };
107ef740c37SChristoph Hellwig 
108ef740c37SChristoph Hellwig /* when we get a read error on a read-only array, we redirect to another
109ef740c37SChristoph Hellwig  * device without failing the first device, or trying to over-write to
110ef740c37SChristoph Hellwig  * correct the read error.  To keep track of bad blocks on a per-bio
111ef740c37SChristoph Hellwig  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
112ef740c37SChristoph Hellwig  */
113ef740c37SChristoph Hellwig #define IO_BLOCKED ((struct bio*)1)
114ef740c37SChristoph Hellwig 
115ef740c37SChristoph Hellwig /* bits for r1bio.state */
116ef740c37SChristoph Hellwig #define	R1BIO_Uptodate	0
117ef740c37SChristoph Hellwig #define	R1BIO_IsSync	1
118ef740c37SChristoph Hellwig #define	R1BIO_Degraded	2
119ef740c37SChristoph Hellwig #define	R1BIO_BehindIO	3
120ef740c37SChristoph Hellwig #define	R1BIO_Barrier	4
121ef740c37SChristoph Hellwig #define R1BIO_BarrierRetry 5
122ef740c37SChristoph Hellwig /* For write-behind requests, we call bi_end_io when
123ef740c37SChristoph Hellwig  * the last non-write-behind device completes, providing
124ef740c37SChristoph Hellwig  * any write was successful.  Otherwise we call when
125ef740c37SChristoph Hellwig  * any write-behind write succeeds, otherwise we call
126ef740c37SChristoph Hellwig  * with failure when last write completes (and all failed).
127ef740c37SChristoph Hellwig  * Record that bi_end_io was called with this flag...
128ef740c37SChristoph Hellwig  */
129ef740c37SChristoph Hellwig #define	R1BIO_Returned 6
130ef740c37SChristoph Hellwig 
131ef740c37SChristoph Hellwig #endif
132