xref: /linux/fs/btrfs/raid56.h (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2012 Fusion-io  All rights reserved.
4  * Copyright (C) 2012 Intel Corp. All rights reserved.
5  */
6 
7 #ifndef BTRFS_RAID56_H
8 #define BTRFS_RAID56_H
9 
10 #include <linux/workqueue.h>
11 #include "volumes.h"
12 
13 enum btrfs_rbio_ops {
14 	BTRFS_RBIO_WRITE,
15 	BTRFS_RBIO_READ_REBUILD,
16 	BTRFS_RBIO_PARITY_SCRUB,
17 	BTRFS_RBIO_REBUILD_MISSING,
18 };
19 
20 struct btrfs_raid_bio {
21 	struct btrfs_io_context *bioc;
22 
23 	/*
24 	 * While we're doing RMW on a stripe we put it into a hash table so we
25 	 * can lock the stripe and merge more rbios into it.
26 	 */
27 	struct list_head hash_list;
28 
29 	/* LRU list for the stripe cache */
30 	struct list_head stripe_cache;
31 
32 	/* For scheduling work in the helper threads */
33 	struct work_struct work;
34 
35 	/*
36 	 * bio_list and bio_list_lock are used to add more bios into the stripe
37 	 * in hopes of avoiding the full RMW
38 	 */
39 	struct bio_list bio_list;
40 	spinlock_t bio_list_lock;
41 
42 	/*
43 	 * Also protected by the bio_list_lock, the plug list is used by the
44 	 * plugging code to collect partial bios while plugged.  The stripe
45 	 * locking code also uses it to hand off the stripe lock to the next
46 	 * pending IO.
47 	 */
48 	struct list_head plug_list;
49 
50 	/* Flags that tell us if it is safe to merge with this bio. */
51 	unsigned long flags;
52 
53 	/*
54 	 * Set if we're doing a parity rebuild for a read from higher up, which
55 	 * is handled differently from a parity rebuild as part of RMW.
56 	 */
57 	enum btrfs_rbio_ops operation;
58 
59 	/* How many pages there are for the full stripe including P/Q */
60 	u16 nr_pages;
61 
62 	/* How many sectors there are for the full stripe including P/Q */
63 	u16 nr_sectors;
64 
65 	/* Number of data stripes (no p/q) */
66 	u8 nr_data;
67 
68 	/* Numer of all stripes (including P/Q) */
69 	u8 real_stripes;
70 
71 	/* How many pages there are for each stripe */
72 	u8 stripe_npages;
73 
74 	/* How many sectors there are for each stripe */
75 	u8 stripe_nsectors;
76 
77 	/* First bad stripe, -1 means no corruption */
78 	s8 faila;
79 
80 	/* Second bad stripe (for RAID6 use) */
81 	s8 failb;
82 
83 	/* Stripe number that we're scrubbing  */
84 	u8 scrubp;
85 
86 	/*
87 	 * Size of all the bios in the bio_list.  This helps us decide if the
88 	 * rbio maps to a full stripe or not.
89 	 */
90 	int bio_list_bytes;
91 
92 	int generic_bio_cnt;
93 
94 	refcount_t refs;
95 
96 	atomic_t stripes_pending;
97 
98 	atomic_t error;
99 
100 	struct work_struct end_io_work;
101 
102 	/* Bitmap to record which horizontal stripe has data */
103 	unsigned long dbitmap;
104 
105 	/* Allocated with stripe_nsectors-many bits for finish_*() calls */
106 	unsigned long finish_pbitmap;
107 
108 	/*
109 	 * These are two arrays of pointers.  We allocate the rbio big enough
110 	 * to hold them both and setup their locations when the rbio is
111 	 * allocated.
112 	 */
113 
114 	/*
115 	 * Pointers to pages that we allocated for reading/writing stripes
116 	 * directly from the disk (including P/Q).
117 	 */
118 	struct page **stripe_pages;
119 
120 	/* Pointers to the sectors in the bio_list, for faster lookup */
121 	struct sector_ptr *bio_sectors;
122 
123 	/*
124 	 * For subpage support, we need to map each sector to above
125 	 * stripe_pages.
126 	 */
127 	struct sector_ptr *stripe_sectors;
128 
129 	/* Allocated with real_stripes-many pointers for finish_*() calls */
130 	void **finish_pointers;
131 };
132 
133 /*
134  * For trace event usage only. Records useful debug info for each bio submitted
135  * by RAID56 to each physical device.
136  *
137  * No matter signed or not, (-1) is always the one indicating we can not grab
138  * the proper stripe number.
139  */
140 struct raid56_bio_trace_info {
141 	u64 devid;
142 
143 	/* The offset inside the stripe. (<= STRIPE_LEN) */
144 	u32 offset;
145 
146 	/*
147 	 * Stripe number.
148 	 * 0 is the first data stripe, and nr_data for P stripe,
149 	 * nr_data + 1 for Q stripe.
150 	 * >= real_stripes for
151 	 */
152 	u8 stripe_nr;
153 };
154 
155 static inline int nr_data_stripes(const struct map_lookup *map)
156 {
157 	return map->num_stripes - btrfs_nr_parity_stripes(map->type);
158 }
159 
160 #define RAID5_P_STRIPE ((u64)-2)
161 #define RAID6_Q_STRIPE ((u64)-1)
162 
163 #define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) ||		\
164 			     ((x) == RAID6_Q_STRIPE))
165 
166 struct btrfs_device;
167 
168 void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
169 			   int mirror_num, bool generic_io);
170 void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc);
171 
172 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
173 			    unsigned int pgoff, u64 logical);
174 
175 struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
176 				struct btrfs_io_context *bioc,
177 				struct btrfs_device *scrub_dev,
178 				unsigned long *dbitmap, int stripe_nsectors);
179 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
180 
181 struct btrfs_raid_bio *
182 raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc);
183 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
184 
185 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
186 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
187 
188 #endif
189