xref: /freebsd/sys/geom/mirror/g_mirror.h (revision 5dae51da3da0cc94d17bd67b308fad304ebec7e0)
1 /*-
2  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_G_MIRROR_H_
30 #define	_G_MIRROR_H_
31 
32 #include <sys/endian.h>
33 #include <sys/md5.h>
34 
35 #define	G_MIRROR_CLASS_NAME	"MIRROR"
36 
37 #define	G_MIRROR_MAGIC		"GEOM::MIRROR"
38 /*
39  * Version history:
40  * 0 - Initial version number.
41  * 1 - Added 'prefer' balance algorithm.
42  * 2 - Added md_genid field to metadata.
43  * 3 - Added md_provsize field to metadata.
44  * 4 - Added 'no failure synchronization' flag.
45  */
46 #define	G_MIRROR_VERSION	4
47 
48 #define	G_MIRROR_BALANCE_NONE		0
49 #define	G_MIRROR_BALANCE_ROUND_ROBIN	1
50 #define	G_MIRROR_BALANCE_LOAD		2
51 #define	G_MIRROR_BALANCE_SPLIT		3
52 #define	G_MIRROR_BALANCE_PREFER		4
53 #define	G_MIRROR_BALANCE_MIN		G_MIRROR_BALANCE_NONE
54 #define	G_MIRROR_BALANCE_MAX		G_MIRROR_BALANCE_PREFER
55 
56 #define	G_MIRROR_DISK_FLAG_DIRTY		0x0000000000000001ULL
57 #define	G_MIRROR_DISK_FLAG_SYNCHRONIZING	0x0000000000000002ULL
58 #define	G_MIRROR_DISK_FLAG_FORCE_SYNC		0x0000000000000004ULL
59 #define	G_MIRROR_DISK_FLAG_INACTIVE		0x0000000000000008ULL
60 #define	G_MIRROR_DISK_FLAG_HARDCODED		0x0000000000000010ULL
61 #define	G_MIRROR_DISK_FLAG_BROKEN		0x0000000000000020ULL
62 #define	G_MIRROR_DISK_FLAG_CANDELETE		0x0000000000000040ULL
63 #define	G_MIRROR_DISK_FLAG_MASK		(G_MIRROR_DISK_FLAG_DIRTY |	\
64 					 G_MIRROR_DISK_FLAG_SYNCHRONIZING | \
65 					 G_MIRROR_DISK_FLAG_FORCE_SYNC | \
66 					 G_MIRROR_DISK_FLAG_INACTIVE | \
67 					 G_MIRROR_DISK_FLAG_CANDELETE)
68 
69 #define	G_MIRROR_DEVICE_FLAG_NOAUTOSYNC	0x0000000000000001ULL
70 #define	G_MIRROR_DEVICE_FLAG_NOFAILSYNC	0x0000000000000002ULL
71 #define	G_MIRROR_DEVICE_FLAG_MASK	(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC | \
72 					 G_MIRROR_DEVICE_FLAG_NOFAILSYNC)
73 
74 #ifdef _KERNEL
75 extern u_int g_mirror_debug;
76 
77 #define	G_MIRROR_DEBUG(lvl, ...)	do {				\
78 	if (g_mirror_debug >= (lvl)) {					\
79 		printf("GEOM_MIRROR");					\
80 		if (g_mirror_debug > 0)					\
81 			printf("[%u]", lvl);				\
82 		printf(": ");						\
83 		printf(__VA_ARGS__);					\
84 		printf("\n");						\
85 	}								\
86 } while (0)
87 #define	G_MIRROR_LOGREQ(lvl, bp, ...)	do {				\
88 	if (g_mirror_debug >= (lvl)) {					\
89 		printf("GEOM_MIRROR");					\
90 		if (g_mirror_debug > 0)					\
91 			printf("[%u]", lvl);				\
92 		printf(": ");						\
93 		printf(__VA_ARGS__);					\
94 		printf(" ");						\
95 		g_print_bio(bp);					\
96 		printf("\n");						\
97 	}								\
98 } while (0)
99 
100 #define	G_MIRROR_BIO_FLAG_REGULAR	0x01
101 #define	G_MIRROR_BIO_FLAG_SYNC		0x02
102 
103 /*
104  * Informations needed for synchronization.
105  */
106 struct g_mirror_disk_sync {
107 	struct g_consumer *ds_consumer;	/* Consumer connected to our mirror. */
108 	off_t		  ds_offset;	/* Offset of next request to send. */
109 	off_t		  ds_offset_done; /* Offset of already synchronized
110 					   region. */
111 	u_int		  ds_syncid;	/* Disk's synchronization ID. */
112 	u_int		  ds_inflight;	/* Number of in-flight sync requests. */
113 	struct bio	**ds_bios;	/* BIOs for synchronization I/O. */
114 };
115 
116 /*
117  * Informations needed for synchronization.
118  */
119 struct g_mirror_device_sync {
120 	struct g_geom	*ds_geom;	/* Synchronization geom. */
121 	u_int		 ds_ndisks;	/* Number of disks in SYNCHRONIZING
122 					   state. */
123 };
124 
125 #define	G_MIRROR_DISK_STATE_NONE		0
126 #define	G_MIRROR_DISK_STATE_NEW			1
127 #define	G_MIRROR_DISK_STATE_ACTIVE		2
128 #define	G_MIRROR_DISK_STATE_STALE		3
129 #define	G_MIRROR_DISK_STATE_SYNCHRONIZING	4
130 #define	G_MIRROR_DISK_STATE_DISCONNECTED	5
131 #define	G_MIRROR_DISK_STATE_DESTROY		6
132 struct g_mirror_disk {
133 	uint32_t	 d_id;		/* Disk ID. */
134 	struct g_consumer *d_consumer;	/* Consumer. */
135 	struct g_mirror_softc	*d_softc; /* Back-pointer to softc. */
136 	int		 d_state;	/* Disk state. */
137 	u_int		 d_priority;	/* Disk priority. */
138 	u_int		 load;		/* Averaged queue length */
139 	off_t		 d_last_offset;	/* Last read offset */
140 	uint64_t	 d_flags;	/* Additional flags. */
141 	u_int		 d_genid;	/* Disk's generation ID. */
142 	struct g_mirror_disk_sync d_sync;/* Sync information. */
143 	LIST_ENTRY(g_mirror_disk) d_next;
144 };
145 #define	d_name	d_consumer->provider->name
146 
147 #define	G_MIRROR_EVENT_DONTWAIT	0x1
148 #define	G_MIRROR_EVENT_WAIT	0x2
149 #define	G_MIRROR_EVENT_DEVICE	0x4
150 #define	G_MIRROR_EVENT_DONE	0x8
151 struct g_mirror_event {
152 	struct g_mirror_disk	*e_disk;
153 	int			 e_state;
154 	int			 e_flags;
155 	int			 e_error;
156 	TAILQ_ENTRY(g_mirror_event) e_next;
157 };
158 
159 #define	G_MIRROR_DEVICE_FLAG_DESTROY	0x0100000000000000ULL
160 #define	G_MIRROR_DEVICE_FLAG_WAIT	0x0200000000000000ULL
161 #define	G_MIRROR_DEVICE_FLAG_DESTROYING	0x0400000000000000ULL
162 #define	G_MIRROR_DEVICE_FLAG_TASTING	0x0800000000000000ULL
163 #define	G_MIRROR_DEVICE_FLAG_WIPE	0x1000000000000000ULL
164 
165 #define	G_MIRROR_DEVICE_STATE_STARTING		0
166 #define	G_MIRROR_DEVICE_STATE_RUNNING		1
167 
168 /* Bump syncid on first write. */
169 #define	G_MIRROR_BUMP_SYNCID	0x1
170 /* Bump genid immediately. */
171 #define	G_MIRROR_BUMP_GENID	0x2
172 struct g_mirror_softc {
173 	u_int		sc_state;	/* Device state. */
174 	uint32_t	sc_slice;	/* Slice size. */
175 	uint8_t		sc_balance;	/* Balance algorithm. */
176 	uint64_t	sc_mediasize;	/* Device size. */
177 	uint32_t	sc_sectorsize;	/* Sector size. */
178 	uint64_t	sc_flags;	/* Additional flags. */
179 
180 	struct g_geom	*sc_geom;
181 	struct g_provider *sc_provider;
182 	int		sc_provider_open;
183 
184 	uint32_t	sc_id;		/* Mirror unique ID. */
185 
186 	struct sx	 sc_lock;
187 	struct bio_queue_head sc_queue;
188 	struct mtx	 sc_queue_mtx;
189 	struct proc	*sc_worker;
190 	struct bio_queue_head sc_regular_delayed; /* Delayed I/O requests due
191 						     collision with sync
192 						     requests. */
193 	struct bio_queue_head sc_inflight; /* In-flight regular write
194 					      requests. */
195 	struct bio_queue_head sc_sync_delayed; /* Delayed sync requests due
196 						  collision with regular
197 						  requests. */
198 
199 	LIST_HEAD(, g_mirror_disk) sc_disks;
200 	u_int		sc_ndisks;	/* Number of disks. */
201 	struct g_mirror_disk *sc_hint;
202 
203 	u_int		sc_genid;	/* Generation ID. */
204 	u_int		sc_syncid;	/* Synchronization ID. */
205 	int		sc_bump_id;
206 	struct g_mirror_device_sync sc_sync;
207 	int		sc_idle;	/* DIRTY flags removed. */
208 	time_t		sc_last_write;
209 	u_int		sc_writes;
210 
211 	TAILQ_HEAD(, g_mirror_event) sc_events;
212 	struct mtx	sc_events_mtx;
213 
214 	struct callout	sc_callout;
215 
216 	struct root_hold_token *sc_rootmount;
217 
218 	struct mtx	 sc_done_mtx;
219 };
220 #define	sc_name	sc_geom->name
221 
222 u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state);
223 #define	G_MIRROR_DESTROY_SOFT		0
224 #define	G_MIRROR_DESTROY_DELAYED	1
225 #define	G_MIRROR_DESTROY_HARD		2
226 int g_mirror_destroy(struct g_mirror_softc *sc, int how);
227 int g_mirror_event_send(void *arg, int state, int flags);
228 struct g_mirror_metadata;
229 int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
230     struct g_mirror_metadata *md);
231 int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md);
232 void g_mirror_fill_metadata(struct g_mirror_softc *sc,
233     struct g_mirror_disk *disk, struct g_mirror_metadata *md);
234 void g_mirror_update_metadata(struct g_mirror_disk *disk);
235 
236 g_ctl_req_t g_mirror_config;
237 #endif	/* _KERNEL */
238 
239 struct g_mirror_metadata {
240 	char		md_magic[16];	/* Magic value. */
241 	uint32_t	md_version;	/* Version number. */
242 	char		md_name[16];	/* Mirror name. */
243 	uint32_t	md_mid;		/* Mirror unique ID. */
244 	uint32_t	md_did;		/* Disk unique ID. */
245 	uint8_t		md_all;		/* Number of disks in mirror. */
246 	uint32_t	md_genid;	/* Generation ID. */
247 	uint32_t	md_syncid;	/* Synchronization ID. */
248 	uint8_t		md_priority;	/* Disk priority. */
249 	uint32_t	md_slice;	/* Slice size. */
250 	uint8_t		md_balance;	/* Balance type. */
251 	uint64_t	md_mediasize;	/* Size of the smallest
252 					   disk in mirror. */
253 	uint32_t	md_sectorsize;	/* Sector size. */
254 	uint64_t	md_sync_offset;	/* Synchronized offset. */
255 	uint64_t	md_mflags;	/* Additional mirror flags. */
256 	uint64_t	md_dflags;	/* Additional disk flags. */
257 	char		md_provider[16]; /* Hardcoded provider. */
258 	uint64_t	md_provsize;	/* Provider's size. */
259 	u_char		md_hash[16];	/* MD5 hash. */
260 };
261 static __inline void
262 mirror_metadata_encode(struct g_mirror_metadata *md, u_char *data)
263 {
264 	MD5_CTX ctx;
265 
266 	bcopy(md->md_magic, data, 16);
267 	le32enc(data + 16, md->md_version);
268 	bcopy(md->md_name, data + 20, 16);
269 	le32enc(data + 36, md->md_mid);
270 	le32enc(data + 40, md->md_did);
271 	*(data + 44) = md->md_all;
272 	le32enc(data + 45, md->md_genid);
273 	le32enc(data + 49, md->md_syncid);
274 	*(data + 53) = md->md_priority;
275 	le32enc(data + 54, md->md_slice);
276 	*(data + 58) = md->md_balance;
277 	le64enc(data + 59, md->md_mediasize);
278 	le32enc(data + 67, md->md_sectorsize);
279 	le64enc(data + 71, md->md_sync_offset);
280 	le64enc(data + 79, md->md_mflags);
281 	le64enc(data + 87, md->md_dflags);
282 	bcopy(md->md_provider, data + 95, 16);
283 	le64enc(data + 111, md->md_provsize);
284 	MD5Init(&ctx);
285 	MD5Update(&ctx, data, 119);
286 	MD5Final(md->md_hash, &ctx);
287 	bcopy(md->md_hash, data + 119, 16);
288 }
289 static __inline int
290 mirror_metadata_decode_v0v1(const u_char *data, struct g_mirror_metadata *md)
291 {
292 	MD5_CTX ctx;
293 
294 	bcopy(data + 20, md->md_name, 16);
295 	md->md_mid = le32dec(data + 36);
296 	md->md_did = le32dec(data + 40);
297 	md->md_all = *(data + 44);
298 	md->md_syncid = le32dec(data + 45);
299 	md->md_priority = *(data + 49);
300 	md->md_slice = le32dec(data + 50);
301 	md->md_balance = *(data + 54);
302 	md->md_mediasize = le64dec(data + 55);
303 	md->md_sectorsize = le32dec(data + 63);
304 	md->md_sync_offset = le64dec(data + 67);
305 	md->md_mflags = le64dec(data + 75);
306 	md->md_dflags = le64dec(data + 83);
307 	bcopy(data + 91, md->md_provider, 16);
308 	bcopy(data + 107, md->md_hash, 16);
309 	MD5Init(&ctx);
310 	MD5Update(&ctx, data, 107);
311 	MD5Final(md->md_hash, &ctx);
312 	if (bcmp(md->md_hash, data + 107, 16) != 0)
313 		return (EINVAL);
314 
315 	/* New fields. */
316 	md->md_genid = 0;
317 	md->md_provsize = 0;
318 
319 	return (0);
320 }
321 static __inline int
322 mirror_metadata_decode_v2(const u_char *data, struct g_mirror_metadata *md)
323 {
324 	MD5_CTX ctx;
325 
326 	bcopy(data + 20, md->md_name, 16);
327 	md->md_mid = le32dec(data + 36);
328 	md->md_did = le32dec(data + 40);
329 	md->md_all = *(data + 44);
330 	md->md_genid = le32dec(data + 45);
331 	md->md_syncid = le32dec(data + 49);
332 	md->md_priority = *(data + 53);
333 	md->md_slice = le32dec(data + 54);
334 	md->md_balance = *(data + 58);
335 	md->md_mediasize = le64dec(data + 59);
336 	md->md_sectorsize = le32dec(data + 67);
337 	md->md_sync_offset = le64dec(data + 71);
338 	md->md_mflags = le64dec(data + 79);
339 	md->md_dflags = le64dec(data + 87);
340 	bcopy(data + 95, md->md_provider, 16);
341 	bcopy(data + 111, md->md_hash, 16);
342 	MD5Init(&ctx);
343 	MD5Update(&ctx, data, 111);
344 	MD5Final(md->md_hash, &ctx);
345 	if (bcmp(md->md_hash, data + 111, 16) != 0)
346 		return (EINVAL);
347 
348 	/* New fields. */
349 	md->md_provsize = 0;
350 
351 	return (0);
352 }
353 static __inline int
354 mirror_metadata_decode_v3v4(const u_char *data, struct g_mirror_metadata *md)
355 {
356 	MD5_CTX ctx;
357 
358 	bcopy(data + 20, md->md_name, 16);
359 	md->md_mid = le32dec(data + 36);
360 	md->md_did = le32dec(data + 40);
361 	md->md_all = *(data + 44);
362 	md->md_genid = le32dec(data + 45);
363 	md->md_syncid = le32dec(data + 49);
364 	md->md_priority = *(data + 53);
365 	md->md_slice = le32dec(data + 54);
366 	md->md_balance = *(data + 58);
367 	md->md_mediasize = le64dec(data + 59);
368 	md->md_sectorsize = le32dec(data + 67);
369 	md->md_sync_offset = le64dec(data + 71);
370 	md->md_mflags = le64dec(data + 79);
371 	md->md_dflags = le64dec(data + 87);
372 	bcopy(data + 95, md->md_provider, 16);
373 	md->md_provsize = le64dec(data + 111);
374 	bcopy(data + 119, md->md_hash, 16);
375 	MD5Init(&ctx);
376 	MD5Update(&ctx, data, 119);
377 	MD5Final(md->md_hash, &ctx);
378 	if (bcmp(md->md_hash, data + 119, 16) != 0)
379 		return (EINVAL);
380 	return (0);
381 }
382 static __inline int
383 mirror_metadata_decode(const u_char *data, struct g_mirror_metadata *md)
384 {
385 	int error;
386 
387 	bcopy(data, md->md_magic, 16);
388 	md->md_version = le32dec(data + 16);
389 	switch (md->md_version) {
390 	case 0:
391 	case 1:
392 		error = mirror_metadata_decode_v0v1(data, md);
393 		break;
394 	case 2:
395 		error = mirror_metadata_decode_v2(data, md);
396 		break;
397 	case 3:
398 	case 4:
399 		error = mirror_metadata_decode_v3v4(data, md);
400 		break;
401 	default:
402 		error = EINVAL;
403 		break;
404 	}
405 	return (error);
406 }
407 
408 static __inline const char *
409 balance_name(u_int balance)
410 {
411 	static const char *algorithms[] = {
412 		[G_MIRROR_BALANCE_NONE] = "none",
413 		[G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin",
414 		[G_MIRROR_BALANCE_LOAD] = "load",
415 		[G_MIRROR_BALANCE_SPLIT] = "split",
416 		[G_MIRROR_BALANCE_PREFER] = "prefer",
417 		[G_MIRROR_BALANCE_MAX + 1] = "unknown"
418 	};
419 
420 	if (balance > G_MIRROR_BALANCE_MAX)
421 		balance = G_MIRROR_BALANCE_MAX + 1;
422 
423 	return (algorithms[balance]);
424 }
425 
426 static __inline int
427 balance_id(const char *name)
428 {
429 	static const char *algorithms[] = {
430 		[G_MIRROR_BALANCE_NONE] = "none",
431 		[G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin",
432 		[G_MIRROR_BALANCE_LOAD] = "load",
433 		[G_MIRROR_BALANCE_SPLIT] = "split",
434 		[G_MIRROR_BALANCE_PREFER] = "prefer"
435 	};
436 	int n;
437 
438 	for (n = G_MIRROR_BALANCE_MIN; n <= G_MIRROR_BALANCE_MAX; n++) {
439 		if (strcmp(name, algorithms[n]) == 0)
440 			return (n);
441 	}
442 	return (-1);
443 }
444 
445 static __inline void
446 mirror_metadata_dump(const struct g_mirror_metadata *md)
447 {
448 	static const char hex[] = "0123456789abcdef";
449 	char hash[16 * 2 + 1];
450 	u_int i;
451 
452 	printf("     magic: %s\n", md->md_magic);
453 	printf("   version: %u\n", (u_int)md->md_version);
454 	printf("      name: %s\n", md->md_name);
455 	printf("       mid: %u\n", (u_int)md->md_mid);
456 	printf("       did: %u\n", (u_int)md->md_did);
457 	printf("       all: %u\n", (u_int)md->md_all);
458 	printf("     genid: %u\n", (u_int)md->md_genid);
459 	printf("    syncid: %u\n", (u_int)md->md_syncid);
460 	printf("  priority: %u\n", (u_int)md->md_priority);
461 	printf("     slice: %u\n", (u_int)md->md_slice);
462 	printf("   balance: %s\n", balance_name((u_int)md->md_balance));
463 	printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize);
464 	printf("sectorsize: %u\n", (u_int)md->md_sectorsize);
465 	printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset);
466 	printf("    mflags:");
467 	if (md->md_mflags == 0)
468 		printf(" NONE");
469 	else {
470 		if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
471 			printf(" NOFAILSYNC");
472 		if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0)
473 			printf(" NOAUTOSYNC");
474 	}
475 	printf("\n");
476 	printf("    dflags:");
477 	if (md->md_dflags == 0)
478 		printf(" NONE");
479 	else {
480 		if ((md->md_dflags & G_MIRROR_DISK_FLAG_DIRTY) != 0)
481 			printf(" DIRTY");
482 		if ((md->md_dflags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0)
483 			printf(" SYNCHRONIZING");
484 		if ((md->md_dflags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0)
485 			printf(" FORCE_SYNC");
486 		if ((md->md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0)
487 			printf(" INACTIVE");
488 	}
489 	printf("\n");
490 	printf("hcprovider: %s\n", md->md_provider);
491 	printf("  provsize: %ju\n", (uintmax_t)md->md_provsize);
492 	bzero(hash, sizeof(hash));
493 	for (i = 0; i < 16; i++) {
494 		hash[i * 2] = hex[md->md_hash[i] >> 4];
495 		hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f];
496 	}
497 	printf("  MD5 hash: %s\n", hash);
498 }
499 #endif	/* !_G_MIRROR_H_ */
500