xref: /freebsd/sys/geom/raid3/g_raid3.h (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_G_RAID3_H_
30 #define	_G_RAID3_H_
31 
32 #include <sys/endian.h>
33 #include <sys/md5.h>
34 
35 #define	G_RAID3_CLASS_NAME	"RAID3"
36 
37 #define	G_RAID3_MAGIC		"GEOM::RAID3"
38 /*
39  * Version history:
40  * 0 - Initial version number.
41  * 1 - Added 'round-robin reading' algorithm.
42  * 2 - Added 'verify reading' algorithm.
43  * 3 - Added md_genid field to metadata.
44  * 4 - Added md_provsize field to metadata.
45  */
46 #define	G_RAID3_VERSION		4
47 
48 #define	G_RAID3_DISK_FLAG_DIRTY		0x0000000000000001ULL
49 #define	G_RAID3_DISK_FLAG_SYNCHRONIZING	0x0000000000000002ULL
50 #define	G_RAID3_DISK_FLAG_FORCE_SYNC	0x0000000000000004ULL
51 #define	G_RAID3_DISK_FLAG_HARDCODED	0x0000000000000008ULL
52 #define	G_RAID3_DISK_FLAG_BROKEN	0x0000000000000010ULL
53 #define	G_RAID3_DISK_FLAG_MASK		(G_RAID3_DISK_FLAG_DIRTY |	\
54 					 G_RAID3_DISK_FLAG_SYNCHRONIZING | \
55 					 G_RAID3_DISK_FLAG_FORCE_SYNC)
56 
57 #define	G_RAID3_DEVICE_FLAG_NOAUTOSYNC	0x0000000000000001ULL
58 #define	G_RAID3_DEVICE_FLAG_ROUND_ROBIN	0x0000000000000002ULL
59 #define	G_RAID3_DEVICE_FLAG_VERIFY	0x0000000000000004ULL
60 #define	G_RAID3_DEVICE_FLAG_MASK	(G_RAID3_DEVICE_FLAG_NOAUTOSYNC | \
61 					 G_RAID3_DEVICE_FLAG_ROUND_ROBIN | \
62 					 G_RAID3_DEVICE_FLAG_VERIFY)
63 
64 #ifdef _KERNEL
65 extern u_int g_raid3_debug;
66 
67 #define	G_RAID3_DEBUG(lvl, ...)	do {					\
68 	if (g_raid3_debug >= (lvl)) {					\
69 		printf("GEOM_RAID3");					\
70 		if (g_raid3_debug > 0)					\
71 			printf("[%u]", lvl);				\
72 		printf(": ");						\
73 		printf(__VA_ARGS__);					\
74 		printf("\n");						\
75 	}								\
76 } while (0)
77 #define	G_RAID3_LOGREQ(lvl, bp, ...)	do {				\
78 	if (g_raid3_debug >= (lvl)) {					\
79 		printf("GEOM_RAID3");					\
80 		if (g_raid3_debug > 0)					\
81 			printf("[%u]", lvl);				\
82 		printf(": ");						\
83 		printf(__VA_ARGS__);					\
84 		printf(" ");						\
85 		g_print_bio(bp);					\
86 		printf("\n");						\
87 	}								\
88 } while (0)
89 
90 #define	G_RAID3_BIO_CFLAG_REGULAR	0x01
91 #define	G_RAID3_BIO_CFLAG_SYNC		0x02
92 #define	G_RAID3_BIO_CFLAG_PARITY	0x04
93 #define	G_RAID3_BIO_CFLAG_NODISK	0x08
94 #define	G_RAID3_BIO_CFLAG_REGSYNC	0x10
95 #define	G_RAID3_BIO_CFLAG_MASK		(G_RAID3_BIO_CFLAG_REGULAR |	\
96 					 G_RAID3_BIO_CFLAG_SYNC |	\
97 					 G_RAID3_BIO_CFLAG_PARITY |	\
98 					 G_RAID3_BIO_CFLAG_NODISK |	\
99 					 G_RAID3_BIO_CFLAG_REGSYNC)
100 
101 #define	G_RAID3_BIO_PFLAG_DEGRADED	0x01
102 #define	G_RAID3_BIO_PFLAG_NOPARITY	0x02
103 #define	G_RAID3_BIO_PFLAG_VERIFY	0x04
104 #define	G_RAID3_BIO_PFLAG_MASK		(G_RAID3_BIO_PFLAG_DEGRADED |	\
105 					 G_RAID3_BIO_PFLAG_NOPARITY |	\
106 					 G_RAID3_BIO_PFLAG_VERIFY)
107 
108 /*
109  * Informations needed for synchronization.
110  */
111 struct g_raid3_disk_sync {
112 	struct g_consumer *ds_consumer;	/* Consumer connected to our device. */
113 	off_t		  ds_offset;	/* Offset of next request to send. */
114 	off_t		  ds_offset_done; /* Offset of already synchronized
115 					   region. */
116 	off_t		  ds_resync;	/* Resynchronize from this offset. */
117 	u_int		  ds_syncid;	/* Disk's synchronization ID. */
118 	u_int		  ds_inflight;	/* Number of in-flight sync requests. */
119 	struct bio	**ds_bios;	/* BIOs for synchronization I/O. */
120 };
121 
122 /*
123  * Informations needed for synchronization.
124  */
125 struct g_raid3_device_sync {
126 	struct g_geom	*ds_geom;	/* Synchronization geom. */
127 };
128 
129 #define	G_RAID3_DISK_STATE_NODISK		0
130 #define	G_RAID3_DISK_STATE_NONE			1
131 #define	G_RAID3_DISK_STATE_NEW			2
132 #define	G_RAID3_DISK_STATE_ACTIVE		3
133 #define	G_RAID3_DISK_STATE_STALE		4
134 #define	G_RAID3_DISK_STATE_SYNCHRONIZING	5
135 #define	G_RAID3_DISK_STATE_DISCONNECTED		6
136 #define	G_RAID3_DISK_STATE_DESTROY		7
137 struct g_raid3_disk {
138 	u_int		 d_no;		/* Disk number. */
139 	struct g_consumer *d_consumer;	/* Consumer. */
140 	struct g_raid3_softc *d_softc;	/* Back-pointer to softc. */
141 	int		 d_state;	/* Disk state. */
142 	uint64_t	 d_flags;	/* Additional flags. */
143 	u_int		 d_genid;	/* Disk's generation ID. */
144 	struct g_raid3_disk_sync d_sync; /* Sync information. */
145 	LIST_ENTRY(g_raid3_disk) d_next;
146 };
147 #define	d_name	d_consumer->provider->name
148 
149 #define	G_RAID3_EVENT_DONTWAIT	0x1
150 #define	G_RAID3_EVENT_WAIT	0x2
151 #define	G_RAID3_EVENT_DEVICE	0x4
152 #define	G_RAID3_EVENT_DONE	0x8
153 struct g_raid3_event {
154 	struct g_raid3_disk	*e_disk;
155 	int			 e_state;
156 	int			 e_flags;
157 	int			 e_error;
158 	TAILQ_ENTRY(g_raid3_event) e_next;
159 };
160 
161 #define	G_RAID3_DEVICE_FLAG_DESTROY	0x0100000000000000ULL
162 #define	G_RAID3_DEVICE_FLAG_WAIT	0x0200000000000000ULL
163 #define	G_RAID3_DEVICE_FLAG_DESTROYING	0x0400000000000000ULL
164 
165 #define	G_RAID3_DEVICE_STATE_STARTING		0
166 #define	G_RAID3_DEVICE_STATE_DEGRADED		1
167 #define	G_RAID3_DEVICE_STATE_COMPLETE		2
168 
169 /* Bump syncid on first write. */
170 #define	G_RAID3_BUMP_SYNCID	0x1
171 /* Bump genid immediately. */
172 #define	G_RAID3_BUMP_GENID	0x2
173 
174 enum g_raid3_zones {
175 	G_RAID3_ZONE_64K,
176 	G_RAID3_ZONE_16K,
177 	G_RAID3_ZONE_4K,
178 	G_RAID3_NUM_ZONES
179 };
180 
181 static __inline enum g_raid3_zones
182 g_raid3_zone(size_t nbytes) {
183 	if (nbytes > 16384)
184 		return (G_RAID3_ZONE_64K);
185 	else if (nbytes > 4096)
186 		return (G_RAID3_ZONE_16K);
187 	else
188 		return (G_RAID3_ZONE_4K);
189 };
190 
191 struct g_raid3_softc {
192 	u_int		sc_state;	/* Device state. */
193 	uint64_t	sc_mediasize;	/* Device size. */
194 	uint32_t	sc_sectorsize;	/* Sector size. */
195 	uint64_t	sc_flags;	/* Additional flags. */
196 
197 	struct g_geom	*sc_geom;
198 	struct g_provider *sc_provider;
199 
200 	uint32_t	sc_id;		/* Device unique ID. */
201 
202 	struct sx	 sc_lock;
203 	struct bio_queue_head sc_queue;
204 	struct mtx	 sc_queue_mtx;
205 	struct proc	*sc_worker;
206 	struct bio_queue_head sc_regular_delayed; /* Delayed I/O requests due
207 						     collision with sync
208 						     requests. */
209 	struct bio_queue_head sc_inflight; /* In-flight regular write
210 					      requests. */
211 	struct bio_queue_head sc_sync_delayed; /* Delayed sync requests due
212 						  collision with regular
213 						  requests. */
214 
215 	struct g_raid3_disk *sc_disks;
216 	u_int		sc_ndisks;	/* Number of disks. */
217 	u_int		sc_round_robin;
218 	struct g_raid3_disk *sc_syncdisk;
219 
220 	struct g_raid3_zone {
221 		uma_zone_t	sz_zone;
222 		size_t		sz_inuse;
223 		size_t		sz_max;
224 		u_int		sz_requested;
225 		u_int		sz_failed;
226 	} sc_zones[G_RAID3_NUM_ZONES];
227 
228 	u_int		sc_genid;	/* Generation ID. */
229 	u_int		sc_syncid;	/* Synchronization ID. */
230 	int		sc_bump_id;
231 	struct g_raid3_device_sync sc_sync;
232 	int		sc_idle;	/* DIRTY flags removed. */
233 	time_t		sc_last_write;
234 	u_int		sc_writes;
235 
236 	TAILQ_HEAD(, g_raid3_event) sc_events;
237 	struct mtx	sc_events_mtx;
238 
239 	struct callout	sc_callout;
240 
241 	struct root_hold_token *sc_rootmount;
242 };
243 #define	sc_name	sc_geom->name
244 
245 const char *g_raid3_get_diskname(struct g_raid3_disk *disk);
246 u_int g_raid3_ndisks(struct g_raid3_softc *sc, int state);
247 #define	G_RAID3_DESTROY_SOFT	0
248 #define	G_RAID3_DESTROY_DELAYED	1
249 #define	G_RAID3_DESTROY_HARD	2
250 int g_raid3_destroy(struct g_raid3_softc *sc, int how);
251 int g_raid3_event_send(void *arg, int state, int flags);
252 struct g_raid3_metadata;
253 int g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
254     struct g_raid3_metadata *md);
255 int g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md);
256 void g_raid3_fill_metadata(struct g_raid3_disk *disk,
257     struct g_raid3_metadata *md);
258 int g_raid3_clear_metadata(struct g_raid3_disk *disk);
259 void g_raid3_update_metadata(struct g_raid3_disk *disk);
260 
261 g_ctl_req_t g_raid3_config;
262 #endif	/* _KERNEL */
263 
264 struct g_raid3_metadata {
265 	char		md_magic[16];	/* Magic value. */
266 	uint32_t	md_version;	/* Version number. */
267 	char		md_name[16];	/* Device name. */
268 	uint32_t	md_id;		/* Device unique ID. */
269 	uint16_t	md_no;		/* Component number. */
270 	uint16_t	md_all;		/* Number of disks in device. */
271 	uint32_t	md_genid;	/* Generation ID. */
272 	uint32_t	md_syncid;	/* Synchronization ID. */
273 	uint64_t	md_mediasize;	/* Size of whole device. */
274 	uint32_t	md_sectorsize;	/* Sector size. */
275 	uint64_t	md_sync_offset;	/* Synchronized offset. */
276 	uint64_t	md_mflags;	/* Additional device flags. */
277 	uint64_t	md_dflags;	/* Additional disk flags. */
278 	char		md_provider[16]; /* Hardcoded provider. */
279 	uint64_t	md_provsize;	/* Provider's size. */
280 	u_char		md_hash[16];	/* MD5 hash. */
281 };
282 static __inline void
283 raid3_metadata_encode(struct g_raid3_metadata *md, u_char *data)
284 {
285 	MD5_CTX ctx;
286 
287 	bcopy(md->md_magic, data, 16);
288 	le32enc(data + 16, md->md_version);
289 	bcopy(md->md_name, data + 20, 16);
290 	le32enc(data + 36, md->md_id);
291 	le16enc(data + 40, md->md_no);
292 	le16enc(data + 42, md->md_all);
293 	le32enc(data + 44, md->md_genid);
294 	le32enc(data + 48, md->md_syncid);
295 	le64enc(data + 52, md->md_mediasize);
296 	le32enc(data + 60, md->md_sectorsize);
297 	le64enc(data + 64, md->md_sync_offset);
298 	le64enc(data + 72, md->md_mflags);
299 	le64enc(data + 80, md->md_dflags);
300 	bcopy(md->md_provider, data + 88, 16);
301 	le64enc(data + 104, md->md_provsize);
302 	MD5Init(&ctx);
303 	MD5Update(&ctx, data, 112);
304 	MD5Final(md->md_hash, &ctx);
305 	bcopy(md->md_hash, data + 112, 16);
306 }
307 static __inline int
308 raid3_metadata_decode_v0v1v2(const u_char *data, struct g_raid3_metadata *md)
309 {
310 	MD5_CTX ctx;
311 
312 	bcopy(data + 20, md->md_name, 16);
313 	md->md_id = le32dec(data + 36);
314 	md->md_no = le16dec(data + 40);
315 	md->md_all = le16dec(data + 42);
316 	md->md_syncid = le32dec(data + 44);
317 	md->md_mediasize = le64dec(data + 48);
318 	md->md_sectorsize = le32dec(data + 56);
319 	md->md_sync_offset = le64dec(data + 60);
320 	md->md_mflags = le64dec(data + 68);
321 	md->md_dflags = le64dec(data + 76);
322 	bcopy(data + 84, md->md_provider, 16);
323 	bcopy(data + 100, md->md_hash, 16);
324 	MD5Init(&ctx);
325 	MD5Update(&ctx, data, 100);
326 	MD5Final(md->md_hash, &ctx);
327 	if (bcmp(md->md_hash, data + 100, 16) != 0)
328 		return (EINVAL);
329 
330 	/* New fields. */
331 	md->md_genid = 0;
332 	md->md_provsize = 0;
333 
334 	return (0);
335 }
336 static __inline int
337 raid3_metadata_decode_v3(const u_char *data, struct g_raid3_metadata *md)
338 {
339 	MD5_CTX ctx;
340 
341 	bcopy(data + 20, md->md_name, 16);
342 	md->md_id = le32dec(data + 36);
343 	md->md_no = le16dec(data + 40);
344 	md->md_all = le16dec(data + 42);
345 	md->md_genid = le32dec(data + 44);
346 	md->md_syncid = le32dec(data + 48);
347 	md->md_mediasize = le64dec(data + 52);
348 	md->md_sectorsize = le32dec(data + 60);
349 	md->md_sync_offset = le64dec(data + 64);
350 	md->md_mflags = le64dec(data + 72);
351 	md->md_dflags = le64dec(data + 80);
352 	bcopy(data + 88, md->md_provider, 16);
353 	bcopy(data + 104, md->md_hash, 16);
354 	MD5Init(&ctx);
355 	MD5Update(&ctx, data, 104);
356 	MD5Final(md->md_hash, &ctx);
357 	if (bcmp(md->md_hash, data + 104, 16) != 0)
358 		return (EINVAL);
359 
360 	/* New fields. */
361 	md->md_provsize = 0;
362 
363 	return (0);
364 }
365 static __inline int
366 raid3_metadata_decode_v4(const u_char *data, struct g_raid3_metadata *md)
367 {
368 	MD5_CTX ctx;
369 
370 	bcopy(data + 20, md->md_name, 16);
371 	md->md_id = le32dec(data + 36);
372 	md->md_no = le16dec(data + 40);
373 	md->md_all = le16dec(data + 42);
374 	md->md_genid = le32dec(data + 44);
375 	md->md_syncid = le32dec(data + 48);
376 	md->md_mediasize = le64dec(data + 52);
377 	md->md_sectorsize = le32dec(data + 60);
378 	md->md_sync_offset = le64dec(data + 64);
379 	md->md_mflags = le64dec(data + 72);
380 	md->md_dflags = le64dec(data + 80);
381 	bcopy(data + 88, md->md_provider, 16);
382 	md->md_provsize = le64dec(data + 104);
383 	bcopy(data + 112, md->md_hash, 16);
384 	MD5Init(&ctx);
385 	MD5Update(&ctx, data, 112);
386 	MD5Final(md->md_hash, &ctx);
387 	if (bcmp(md->md_hash, data + 112, 16) != 0)
388 		return (EINVAL);
389 	return (0);
390 }
391 static __inline int
392 raid3_metadata_decode(const u_char *data, struct g_raid3_metadata *md)
393 {
394 	int error;
395 
396 	bcopy(data, md->md_magic, 16);
397 	md->md_version = le32dec(data + 16);
398 	switch (md->md_version) {
399 	case 0:
400 	case 1:
401 	case 2:
402 		error = raid3_metadata_decode_v0v1v2(data, md);
403 		break;
404 	case 3:
405 		error = raid3_metadata_decode_v3(data, md);
406 		break;
407 	case 4:
408 		error = raid3_metadata_decode_v4(data, md);
409 		break;
410 	default:
411 		error = EINVAL;
412 		break;
413 	}
414 	return (error);
415 }
416 
417 static __inline void
418 raid3_metadata_dump(const struct g_raid3_metadata *md)
419 {
420 	static const char hex[] = "0123456789abcdef";
421 	char hash[16 * 2 + 1];
422 	u_int i;
423 
424 	printf("     magic: %s\n", md->md_magic);
425 	printf("   version: %u\n", (u_int)md->md_version);
426 	printf("      name: %s\n", md->md_name);
427 	printf("        id: %u\n", (u_int)md->md_id);
428 	printf("        no: %u\n", (u_int)md->md_no);
429 	printf("       all: %u\n", (u_int)md->md_all);
430 	printf("     genid: %u\n", (u_int)md->md_genid);
431 	printf("    syncid: %u\n", (u_int)md->md_syncid);
432 	printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize);
433 	printf("sectorsize: %u\n", (u_int)md->md_sectorsize);
434 	printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset);
435 	printf("    mflags:");
436 	if (md->md_mflags == 0)
437 		printf(" NONE");
438 	else {
439 		if ((md->md_mflags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0)
440 			printf(" NOAUTOSYNC");
441 		if ((md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0)
442 			printf(" ROUND-ROBIN");
443 		if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0)
444 			printf(" VERIFY");
445 	}
446 	printf("\n");
447 	printf("    dflags:");
448 	if (md->md_dflags == 0)
449 		printf(" NONE");
450 	else {
451 		if ((md->md_dflags & G_RAID3_DISK_FLAG_DIRTY) != 0)
452 			printf(" DIRTY");
453 		if ((md->md_dflags & G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0)
454 			printf(" SYNCHRONIZING");
455 		if ((md->md_dflags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0)
456 			printf(" FORCE_SYNC");
457 	}
458 	printf("\n");
459 	printf("hcprovider: %s\n", md->md_provider);
460 	printf("  provsize: %ju\n", (uintmax_t)md->md_provsize);
461 	bzero(hash, sizeof(hash));
462 	for (i = 0; i < 16; i++) {
463 		hash[i * 2] = hex[md->md_hash[i] >> 4];
464 		hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f];
465 	}
466 	printf("  MD5 hash: %s\n", hash);
467 }
468 #endif	/* !_G_RAID3_H_ */
469