xref: /freebsd/sys/geom/mirror/g_mirror.h (revision 8d20be1e22095c27faf8fe8b2f0d089739cc742e)
1 /*-
2  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_G_MIRROR_H_
30 #define	_G_MIRROR_H_
31 
32 #include <sys/endian.h>
33 #include <sys/md5.h>
34 
35 #define	G_MIRROR_CLASS_NAME	"MIRROR"
36 
37 #define	G_MIRROR_MAGIC		"GEOM::MIRROR"
38 /*
39  * Version history:
40  * 0 - Initial version number.
41  * 1 - Added 'prefer' balance algorithm.
42  * 2 - Added md_genid field to metadata.
43  * 3 - Added md_provsize field to metadata.
44  * 4 - Added 'no failure synchronization' flag.
45  */
46 #define	G_MIRROR_VERSION	4
47 
48 #define	G_MIRROR_BALANCE_NONE		0
49 #define	G_MIRROR_BALANCE_ROUND_ROBIN	1
50 #define	G_MIRROR_BALANCE_LOAD		2
51 #define	G_MIRROR_BALANCE_SPLIT		3
52 #define	G_MIRROR_BALANCE_PREFER		4
53 #define	G_MIRROR_BALANCE_MIN		G_MIRROR_BALANCE_NONE
54 #define	G_MIRROR_BALANCE_MAX		G_MIRROR_BALANCE_PREFER
55 
56 #define	G_MIRROR_DISK_FLAG_DIRTY		0x0000000000000001ULL
57 #define	G_MIRROR_DISK_FLAG_SYNCHRONIZING	0x0000000000000002ULL
58 #define	G_MIRROR_DISK_FLAG_FORCE_SYNC		0x0000000000000004ULL
59 #define	G_MIRROR_DISK_FLAG_INACTIVE		0x0000000000000008ULL
60 #define	G_MIRROR_DISK_FLAG_HARDCODED		0x0000000000000010ULL
61 #define	G_MIRROR_DISK_FLAG_BROKEN		0x0000000000000020ULL
62 #define	G_MIRROR_DISK_FLAG_CANDELETE		0x0000000000000040ULL
63 #define	G_MIRROR_DISK_FLAG_MASK		(G_MIRROR_DISK_FLAG_DIRTY |	\
64 					 G_MIRROR_DISK_FLAG_SYNCHRONIZING | \
65 					 G_MIRROR_DISK_FLAG_FORCE_SYNC | \
66 					 G_MIRROR_DISK_FLAG_INACTIVE | \
67 					 G_MIRROR_DISK_FLAG_CANDELETE)
68 
69 #define	G_MIRROR_DEVICE_FLAG_NOAUTOSYNC	0x0000000000000001ULL
70 #define	G_MIRROR_DEVICE_FLAG_NOFAILSYNC	0x0000000000000002ULL
71 #define	G_MIRROR_DEVICE_FLAG_MASK	(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC | \
72 					 G_MIRROR_DEVICE_FLAG_NOFAILSYNC)
73 
74 #ifdef _KERNEL
75 extern u_int g_mirror_debug;
76 
77 #define	G_MIRROR_DEBUG(lvl, ...)	do {				\
78 	if (g_mirror_debug >= (lvl)) {					\
79 		printf("GEOM_MIRROR");					\
80 		if (g_mirror_debug > 0)					\
81 			printf("[%u]", lvl);				\
82 		printf(": ");						\
83 		printf(__VA_ARGS__);					\
84 		printf("\n");						\
85 	}								\
86 } while (0)
87 #define	G_MIRROR_LOGREQ(lvl, bp, ...)	do {				\
88 	if (g_mirror_debug >= (lvl)) {					\
89 		printf("GEOM_MIRROR");					\
90 		if (g_mirror_debug > 0)					\
91 			printf("[%u]", lvl);				\
92 		printf(": ");						\
93 		printf(__VA_ARGS__);					\
94 		printf(" ");						\
95 		g_print_bio(bp);					\
96 		printf("\n");						\
97 	}								\
98 } while (0)
99 
100 #define	G_MIRROR_BIO_FLAG_REGULAR	0x01
101 #define	G_MIRROR_BIO_FLAG_SYNC		0x02
102 
103 /*
104  * Informations needed for synchronization.
105  */
106 struct g_mirror_disk_sync {
107 	struct g_consumer *ds_consumer;	/* Consumer connected to our mirror. */
108 	off_t		  ds_offset;	/* Offset of next request to send. */
109 	off_t		  ds_offset_done; /* Offset of already synchronized
110 					   region. */
111 	u_int		  ds_syncid;	/* Disk's synchronization ID. */
112 	u_int		  ds_inflight;	/* Number of in-flight sync requests. */
113 	struct bio	**ds_bios;	/* BIOs for synchronization I/O. */
114 };
115 
116 /*
117  * Informations needed for synchronization.
118  */
119 struct g_mirror_device_sync {
120 	struct g_geom	*ds_geom;	/* Synchronization geom. */
121 	u_int		 ds_ndisks;	/* Number of disks in SYNCHRONIZING
122 					   state. */
123 };
124 
125 #define	G_MIRROR_DISK_STATE_NONE		0
126 #define	G_MIRROR_DISK_STATE_NEW			1
127 #define	G_MIRROR_DISK_STATE_ACTIVE		2
128 #define	G_MIRROR_DISK_STATE_STALE		3
129 #define	G_MIRROR_DISK_STATE_SYNCHRONIZING	4
130 #define	G_MIRROR_DISK_STATE_DISCONNECTED	5
131 #define	G_MIRROR_DISK_STATE_DESTROY		6
132 struct g_mirror_disk {
133 	uint32_t	 d_id;		/* Disk ID. */
134 	struct g_consumer *d_consumer;	/* Consumer. */
135 	struct g_mirror_softc	*d_softc; /* Back-pointer to softc. */
136 	int		 d_state;	/* Disk state. */
137 	u_int		 d_priority;	/* Disk priority. */
138 	u_int		 load;		/* Averaged queue length */
139 	off_t		 d_last_offset;	/* Last read offset */
140 	uint64_t	 d_flags;	/* Additional flags. */
141 	u_int		 d_genid;	/* Disk's generation ID. */
142 	struct g_mirror_disk_sync d_sync;/* Sync information. */
143 	LIST_ENTRY(g_mirror_disk) d_next;
144 };
145 #define	d_name	d_consumer->provider->name
146 
147 #define	G_MIRROR_EVENT_DONTWAIT	0x1
148 #define	G_MIRROR_EVENT_WAIT	0x2
149 #define	G_MIRROR_EVENT_DEVICE	0x4
150 #define	G_MIRROR_EVENT_DONE	0x8
151 struct g_mirror_event {
152 	struct g_mirror_disk	*e_disk;
153 	int			 e_state;
154 	int			 e_flags;
155 	int			 e_error;
156 	TAILQ_ENTRY(g_mirror_event) e_next;
157 };
158 
159 #define	G_MIRROR_DEVICE_FLAG_DESTROY	0x0100000000000000ULL
160 #define	G_MIRROR_DEVICE_FLAG_WAIT	0x0200000000000000ULL
161 #define	G_MIRROR_DEVICE_FLAG_DESTROYING	0x0400000000000000ULL
162 #define	G_MIRROR_DEVICE_FLAG_TASTING	0x0800000000000000ULL
163 
164 #define	G_MIRROR_DEVICE_STATE_STARTING		0
165 #define	G_MIRROR_DEVICE_STATE_RUNNING		1
166 
167 /* Bump syncid on first write. */
168 #define	G_MIRROR_BUMP_SYNCID	0x1
169 /* Bump genid immediately. */
170 #define	G_MIRROR_BUMP_GENID	0x2
171 struct g_mirror_softc {
172 	u_int		sc_state;	/* Device state. */
173 	uint32_t	sc_slice;	/* Slice size. */
174 	uint8_t		sc_balance;	/* Balance algorithm. */
175 	uint64_t	sc_mediasize;	/* Device size. */
176 	uint32_t	sc_sectorsize;	/* Sector size. */
177 	uint64_t	sc_flags;	/* Additional flags. */
178 
179 	struct g_geom	*sc_geom;
180 	struct g_provider *sc_provider;
181 
182 	uint32_t	sc_id;		/* Mirror unique ID. */
183 
184 	struct sx	 sc_lock;
185 	struct bio_queue_head sc_queue;
186 	struct mtx	 sc_queue_mtx;
187 	struct proc	*sc_worker;
188 	struct bio_queue_head sc_regular_delayed; /* Delayed I/O requests due
189 						     collision with sync
190 						     requests. */
191 	struct bio_queue_head sc_inflight; /* In-flight regular write
192 					      requests. */
193 	struct bio_queue_head sc_sync_delayed; /* Delayed sync requests due
194 						  collision with regular
195 						  requests. */
196 
197 	LIST_HEAD(, g_mirror_disk) sc_disks;
198 	u_int		sc_ndisks;	/* Number of disks. */
199 	struct g_mirror_disk *sc_hint;
200 
201 	u_int		sc_genid;	/* Generation ID. */
202 	u_int		sc_syncid;	/* Synchronization ID. */
203 	int		sc_bump_id;
204 	struct g_mirror_device_sync sc_sync;
205 	int		sc_idle;	/* DIRTY flags removed. */
206 	time_t		sc_last_write;
207 	u_int		sc_writes;
208 
209 	TAILQ_HEAD(, g_mirror_event) sc_events;
210 	struct mtx	sc_events_mtx;
211 
212 	struct callout	sc_callout;
213 
214 	struct root_hold_token *sc_rootmount;
215 
216 	struct mtx	 sc_done_mtx;
217 };
218 #define	sc_name	sc_geom->name
219 
220 u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state);
221 #define	G_MIRROR_DESTROY_SOFT		0
222 #define	G_MIRROR_DESTROY_DELAYED	1
223 #define	G_MIRROR_DESTROY_HARD		2
224 int g_mirror_destroy(struct g_mirror_softc *sc, int how);
225 int g_mirror_event_send(void *arg, int state, int flags);
226 struct g_mirror_metadata;
227 int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
228     struct g_mirror_metadata *md);
229 int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md);
230 void g_mirror_fill_metadata(struct g_mirror_softc *sc,
231     struct g_mirror_disk *disk, struct g_mirror_metadata *md);
232 void g_mirror_update_metadata(struct g_mirror_disk *disk);
233 
234 g_ctl_req_t g_mirror_config;
235 #endif	/* _KERNEL */
236 
237 struct g_mirror_metadata {
238 	char		md_magic[16];	/* Magic value. */
239 	uint32_t	md_version;	/* Version number. */
240 	char		md_name[16];	/* Mirror name. */
241 	uint32_t	md_mid;		/* Mirror unique ID. */
242 	uint32_t	md_did;		/* Disk unique ID. */
243 	uint8_t		md_all;		/* Number of disks in mirror. */
244 	uint32_t	md_genid;	/* Generation ID. */
245 	uint32_t	md_syncid;	/* Synchronization ID. */
246 	uint8_t		md_priority;	/* Disk priority. */
247 	uint32_t	md_slice;	/* Slice size. */
248 	uint8_t		md_balance;	/* Balance type. */
249 	uint64_t	md_mediasize;	/* Size of the smallest
250 					   disk in mirror. */
251 	uint32_t	md_sectorsize;	/* Sector size. */
252 	uint64_t	md_sync_offset;	/* Synchronized offset. */
253 	uint64_t	md_mflags;	/* Additional mirror flags. */
254 	uint64_t	md_dflags;	/* Additional disk flags. */
255 	char		md_provider[16]; /* Hardcoded provider. */
256 	uint64_t	md_provsize;	/* Provider's size. */
257 	u_char		md_hash[16];	/* MD5 hash. */
258 };
259 static __inline void
260 mirror_metadata_encode(struct g_mirror_metadata *md, u_char *data)
261 {
262 	MD5_CTX ctx;
263 
264 	bcopy(md->md_magic, data, 16);
265 	le32enc(data + 16, md->md_version);
266 	bcopy(md->md_name, data + 20, 16);
267 	le32enc(data + 36, md->md_mid);
268 	le32enc(data + 40, md->md_did);
269 	*(data + 44) = md->md_all;
270 	le32enc(data + 45, md->md_genid);
271 	le32enc(data + 49, md->md_syncid);
272 	*(data + 53) = md->md_priority;
273 	le32enc(data + 54, md->md_slice);
274 	*(data + 58) = md->md_balance;
275 	le64enc(data + 59, md->md_mediasize);
276 	le32enc(data + 67, md->md_sectorsize);
277 	le64enc(data + 71, md->md_sync_offset);
278 	le64enc(data + 79, md->md_mflags);
279 	le64enc(data + 87, md->md_dflags);
280 	bcopy(md->md_provider, data + 95, 16);
281 	le64enc(data + 111, md->md_provsize);
282 	MD5Init(&ctx);
283 	MD5Update(&ctx, data, 119);
284 	MD5Final(md->md_hash, &ctx);
285 	bcopy(md->md_hash, data + 119, 16);
286 }
287 static __inline int
288 mirror_metadata_decode_v0v1(const u_char *data, struct g_mirror_metadata *md)
289 {
290 	MD5_CTX ctx;
291 
292 	bcopy(data + 20, md->md_name, 16);
293 	md->md_mid = le32dec(data + 36);
294 	md->md_did = le32dec(data + 40);
295 	md->md_all = *(data + 44);
296 	md->md_syncid = le32dec(data + 45);
297 	md->md_priority = *(data + 49);
298 	md->md_slice = le32dec(data + 50);
299 	md->md_balance = *(data + 54);
300 	md->md_mediasize = le64dec(data + 55);
301 	md->md_sectorsize = le32dec(data + 63);
302 	md->md_sync_offset = le64dec(data + 67);
303 	md->md_mflags = le64dec(data + 75);
304 	md->md_dflags = le64dec(data + 83);
305 	bcopy(data + 91, md->md_provider, 16);
306 	bcopy(data + 107, md->md_hash, 16);
307 	MD5Init(&ctx);
308 	MD5Update(&ctx, data, 107);
309 	MD5Final(md->md_hash, &ctx);
310 	if (bcmp(md->md_hash, data + 107, 16) != 0)
311 		return (EINVAL);
312 
313 	/* New fields. */
314 	md->md_genid = 0;
315 	md->md_provsize = 0;
316 
317 	return (0);
318 }
319 static __inline int
320 mirror_metadata_decode_v2(const u_char *data, struct g_mirror_metadata *md)
321 {
322 	MD5_CTX ctx;
323 
324 	bcopy(data + 20, md->md_name, 16);
325 	md->md_mid = le32dec(data + 36);
326 	md->md_did = le32dec(data + 40);
327 	md->md_all = *(data + 44);
328 	md->md_genid = le32dec(data + 45);
329 	md->md_syncid = le32dec(data + 49);
330 	md->md_priority = *(data + 53);
331 	md->md_slice = le32dec(data + 54);
332 	md->md_balance = *(data + 58);
333 	md->md_mediasize = le64dec(data + 59);
334 	md->md_sectorsize = le32dec(data + 67);
335 	md->md_sync_offset = le64dec(data + 71);
336 	md->md_mflags = le64dec(data + 79);
337 	md->md_dflags = le64dec(data + 87);
338 	bcopy(data + 95, md->md_provider, 16);
339 	bcopy(data + 111, md->md_hash, 16);
340 	MD5Init(&ctx);
341 	MD5Update(&ctx, data, 111);
342 	MD5Final(md->md_hash, &ctx);
343 	if (bcmp(md->md_hash, data + 111, 16) != 0)
344 		return (EINVAL);
345 
346 	/* New fields. */
347 	md->md_provsize = 0;
348 
349 	return (0);
350 }
351 static __inline int
352 mirror_metadata_decode_v3v4(const u_char *data, struct g_mirror_metadata *md)
353 {
354 	MD5_CTX ctx;
355 
356 	bcopy(data + 20, md->md_name, 16);
357 	md->md_mid = le32dec(data + 36);
358 	md->md_did = le32dec(data + 40);
359 	md->md_all = *(data + 44);
360 	md->md_genid = le32dec(data + 45);
361 	md->md_syncid = le32dec(data + 49);
362 	md->md_priority = *(data + 53);
363 	md->md_slice = le32dec(data + 54);
364 	md->md_balance = *(data + 58);
365 	md->md_mediasize = le64dec(data + 59);
366 	md->md_sectorsize = le32dec(data + 67);
367 	md->md_sync_offset = le64dec(data + 71);
368 	md->md_mflags = le64dec(data + 79);
369 	md->md_dflags = le64dec(data + 87);
370 	bcopy(data + 95, md->md_provider, 16);
371 	md->md_provsize = le64dec(data + 111);
372 	bcopy(data + 119, md->md_hash, 16);
373 	MD5Init(&ctx);
374 	MD5Update(&ctx, data, 119);
375 	MD5Final(md->md_hash, &ctx);
376 	if (bcmp(md->md_hash, data + 119, 16) != 0)
377 		return (EINVAL);
378 	return (0);
379 }
380 static __inline int
381 mirror_metadata_decode(const u_char *data, struct g_mirror_metadata *md)
382 {
383 	int error;
384 
385 	bcopy(data, md->md_magic, 16);
386 	md->md_version = le32dec(data + 16);
387 	switch (md->md_version) {
388 	case 0:
389 	case 1:
390 		error = mirror_metadata_decode_v0v1(data, md);
391 		break;
392 	case 2:
393 		error = mirror_metadata_decode_v2(data, md);
394 		break;
395 	case 3:
396 	case 4:
397 		error = mirror_metadata_decode_v3v4(data, md);
398 		break;
399 	default:
400 		error = EINVAL;
401 		break;
402 	}
403 	return (error);
404 }
405 
406 static __inline const char *
407 balance_name(u_int balance)
408 {
409 	static const char *algorithms[] = {
410 		[G_MIRROR_BALANCE_NONE] = "none",
411 		[G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin",
412 		[G_MIRROR_BALANCE_LOAD] = "load",
413 		[G_MIRROR_BALANCE_SPLIT] = "split",
414 		[G_MIRROR_BALANCE_PREFER] = "prefer",
415 		[G_MIRROR_BALANCE_MAX + 1] = "unknown"
416 	};
417 
418 	if (balance > G_MIRROR_BALANCE_MAX)
419 		balance = G_MIRROR_BALANCE_MAX + 1;
420 
421 	return (algorithms[balance]);
422 }
423 
424 static __inline int
425 balance_id(const char *name)
426 {
427 	static const char *algorithms[] = {
428 		[G_MIRROR_BALANCE_NONE] = "none",
429 		[G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin",
430 		[G_MIRROR_BALANCE_LOAD] = "load",
431 		[G_MIRROR_BALANCE_SPLIT] = "split",
432 		[G_MIRROR_BALANCE_PREFER] = "prefer"
433 	};
434 	int n;
435 
436 	for (n = G_MIRROR_BALANCE_MIN; n <= G_MIRROR_BALANCE_MAX; n++) {
437 		if (strcmp(name, algorithms[n]) == 0)
438 			return (n);
439 	}
440 	return (-1);
441 }
442 
443 static __inline void
444 mirror_metadata_dump(const struct g_mirror_metadata *md)
445 {
446 	static const char hex[] = "0123456789abcdef";
447 	char hash[16 * 2 + 1];
448 	u_int i;
449 
450 	printf("     magic: %s\n", md->md_magic);
451 	printf("   version: %u\n", (u_int)md->md_version);
452 	printf("      name: %s\n", md->md_name);
453 	printf("       mid: %u\n", (u_int)md->md_mid);
454 	printf("       did: %u\n", (u_int)md->md_did);
455 	printf("       all: %u\n", (u_int)md->md_all);
456 	printf("     genid: %u\n", (u_int)md->md_genid);
457 	printf("    syncid: %u\n", (u_int)md->md_syncid);
458 	printf("  priority: %u\n", (u_int)md->md_priority);
459 	printf("     slice: %u\n", (u_int)md->md_slice);
460 	printf("   balance: %s\n", balance_name((u_int)md->md_balance));
461 	printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize);
462 	printf("sectorsize: %u\n", (u_int)md->md_sectorsize);
463 	printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset);
464 	printf("    mflags:");
465 	if (md->md_mflags == 0)
466 		printf(" NONE");
467 	else {
468 		if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
469 			printf(" NOFAILSYNC");
470 		if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0)
471 			printf(" NOAUTOSYNC");
472 	}
473 	printf("\n");
474 	printf("    dflags:");
475 	if (md->md_dflags == 0)
476 		printf(" NONE");
477 	else {
478 		if ((md->md_dflags & G_MIRROR_DISK_FLAG_DIRTY) != 0)
479 			printf(" DIRTY");
480 		if ((md->md_dflags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0)
481 			printf(" SYNCHRONIZING");
482 		if ((md->md_dflags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0)
483 			printf(" FORCE_SYNC");
484 		if ((md->md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0)
485 			printf(" INACTIVE");
486 	}
487 	printf("\n");
488 	printf("hcprovider: %s\n", md->md_provider);
489 	printf("  provsize: %ju\n", (uintmax_t)md->md_provsize);
490 	bzero(hash, sizeof(hash));
491 	for (i = 0; i < 16; i++) {
492 		hash[i * 2] = hex[md->md_hash[i] >> 4];
493 		hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f];
494 	}
495 	printf("  MD5 hash: %s\n", hash);
496 }
497 #endif	/* !_G_MIRROR_H_ */
498