xref: /freebsd/sys/geom/journal/g_journal.c (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/bio.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/eventhandler.h>
44 #include <sys/proc.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <sys/taskqueue.h>
48 #include <sys/vnode.h>
49 #include <sys/sbuf.h>
50 #ifdef GJ_MEMDEBUG
51 #include <sys/stack.h>
52 #include <sys/kdb.h>
53 #endif
54 #include <vm/vm.h>
55 #include <vm/vm_kern.h>
56 #include <geom/geom.h>
57 #include <geom/geom_dbg.h>
58 
59 #include <geom/journal/g_journal.h>
60 
61 FEATURE(geom_journal, "GEOM journaling support");
62 
63 /*
64  * On-disk journal format:
65  *
66  * JH - Journal header
67  * RH - Record header
68  *
69  * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
70  * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
71  * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
72  *
73  */
74 
75 CTASSERT(sizeof(struct g_journal_header) <= 512);
76 CTASSERT(sizeof(struct g_journal_record_header) <= 512);
77 
78 static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
79 static struct mtx g_journal_cache_mtx;
80 MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
81 
82 const struct g_journal_desc *g_journal_filesystems[] = {
83 	&g_journal_ufs,
84 	NULL
85 };
86 
87 SYSCTL_DECL(_kern_geom);
88 
89 int g_journal_debug = 0;
90 static u_int g_journal_switch_time = 10;
91 static u_int g_journal_force_switch = 70;
92 static u_int g_journal_parallel_flushes = 16;
93 static u_int g_journal_parallel_copies = 16;
94 static u_int g_journal_accept_immediately = 64;
95 static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
96 static u_int g_journal_do_optimize = 1;
97 
98 static SYSCTL_NODE(_kern_geom, OID_AUTO, journal,
99     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
100     "GEOM_JOURNAL stuff");
101 SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RWTUN, &g_journal_debug, 0,
102     "Debug level");
103 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
104     &g_journal_switch_time, 0, "Switch journals every N seconds");
105 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
106     &g_journal_force_switch, 0, "Force switch when journal is N% full");
107 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
108     &g_journal_parallel_flushes, 0,
109     "Number of flush I/O requests to send in parallel");
110 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
111     &g_journal_accept_immediately, 0,
112     "Number of I/O requests accepted immediately");
113 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
114     &g_journal_parallel_copies, 0,
115     "Number of copy I/O requests to send in parallel");
116 static int
117 g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
118 {
119 	u_int entries;
120 	int error;
121 
122 	entries = g_journal_record_entries;
123 	error = sysctl_handle_int(oidp, &entries, 0, req);
124 	if (error != 0 || req->newptr == NULL)
125 		return (error);
126 	if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
127 		return (EINVAL);
128 	g_journal_record_entries = entries;
129 	return (0);
130 }
131 SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
132     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
133     g_journal_record_entries_sysctl, "I",
134     "Maximum number of entires in one journal record");
135 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
136     &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
137 
138 static u_long g_journal_cache_used = 0;
139 static u_long g_journal_cache_limit = 64 * 1024 * 1024;
140 static u_int g_journal_cache_divisor = 2;
141 static u_int g_journal_cache_switch = 90;
142 static u_int g_journal_cache_misses = 0;
143 static u_int g_journal_cache_alloc_failures = 0;
144 static u_long g_journal_cache_low = 0;
145 
146 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache,
147     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
148     "GEOM_JOURNAL cache");
149 SYSCTL_ULONG(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
150     &g_journal_cache_used, 0, "Number of allocated bytes");
151 static int
152 g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
153 {
154 	u_long limit;
155 	int error;
156 
157 	limit = g_journal_cache_limit;
158 	error = sysctl_handle_long(oidp, &limit, 0, req);
159 	if (error != 0 || req->newptr == NULL)
160 		return (error);
161 	g_journal_cache_limit = limit;
162 	g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
163 	return (0);
164 }
165 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
166     CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, NULL, 0,
167     g_journal_cache_limit_sysctl, "I",
168     "Maximum number of allocated bytes");
169 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
170     &g_journal_cache_divisor, 0,
171     "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
172 static int
173 g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
174 {
175 	u_int cswitch;
176 	int error;
177 
178 	cswitch = g_journal_cache_switch;
179 	error = sysctl_handle_int(oidp, &cswitch, 0, req);
180 	if (error != 0 || req->newptr == NULL)
181 		return (error);
182 	if (cswitch > 100)
183 		return (EINVAL);
184 	g_journal_cache_switch = cswitch;
185 	g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
186 	return (0);
187 }
188 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
189     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
190     g_journal_cache_switch_sysctl, "I",
191     "Force switch when we hit this percent of cache use");
192 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
193     &g_journal_cache_misses, 0, "Number of cache misses");
194 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
195     &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
196 
197 static u_long g_journal_stats_bytes_skipped = 0;
198 static u_long g_journal_stats_combined_ios = 0;
199 static u_long g_journal_stats_switches = 0;
200 static u_long g_journal_stats_wait_for_copy = 0;
201 static u_long g_journal_stats_journal_full = 0;
202 static u_long g_journal_stats_low_mem = 0;
203 
204 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats,
205     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
206     "GEOM_JOURNAL statistics");
207 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
208     &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
209 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
210     &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
211 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
212     &g_journal_stats_switches, 0, "Number of journal switches");
213 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
214     &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
215 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
216     &g_journal_stats_journal_full, 0,
217     "Number of times journal was almost full.");
218 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
219     &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
220 
221 static g_taste_t g_journal_taste;
222 static g_ctl_req_t g_journal_config;
223 static g_dumpconf_t g_journal_dumpconf;
224 static g_init_t g_journal_init;
225 static g_fini_t g_journal_fini;
226 
227 struct g_class g_journal_class = {
228 	.name = G_JOURNAL_CLASS_NAME,
229 	.version = G_VERSION,
230 	.taste = g_journal_taste,
231 	.ctlreq = g_journal_config,
232 	.dumpconf = g_journal_dumpconf,
233 	.init = g_journal_init,
234 	.fini = g_journal_fini
235 };
236 
237 static int g_journal_destroy(struct g_journal_softc *sc);
238 static void g_journal_metadata_update(struct g_journal_softc *sc);
239 static void g_journal_start_switcher(struct g_class *mp);
240 static void g_journal_stop_switcher(void);
241 static void g_journal_switch_wait(struct g_journal_softc *sc);
242 
243 #define	GJ_SWITCHER_WORKING	0
244 #define	GJ_SWITCHER_DIE		1
245 #define	GJ_SWITCHER_DIED	2
246 static struct proc *g_journal_switcher_proc = NULL;
247 static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
248 static int g_journal_switcher_wokenup = 0;
249 static int g_journal_sync_requested = 0;
250 
251 #ifdef GJ_MEMDEBUG
252 struct meminfo {
253 	size_t		mi_size;
254 	struct stack	mi_stack;
255 };
256 #endif
257 
258 /*
259  * We use our own malloc/realloc/free funtions, so we can collect statistics
260  * and force journal switch when we're running out of cache.
261  */
262 static void *
263 gj_malloc(size_t size, int flags)
264 {
265 	void *p;
266 #ifdef GJ_MEMDEBUG
267 	struct meminfo *mi;
268 #endif
269 
270 	mtx_lock(&g_journal_cache_mtx);
271 	if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
272 	    g_journal_cache_used + size > g_journal_cache_low) {
273 		GJ_DEBUG(1, "No cache, waking up the switcher.");
274 		g_journal_switcher_wokenup = 1;
275 		wakeup(&g_journal_switcher_state);
276 	}
277 	if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
278 	    g_journal_cache_used + size > g_journal_cache_limit) {
279 		mtx_unlock(&g_journal_cache_mtx);
280 		g_journal_cache_alloc_failures++;
281 		return (NULL);
282 	}
283 	g_journal_cache_used += size;
284 	mtx_unlock(&g_journal_cache_mtx);
285 	flags &= ~M_NOWAIT;
286 #ifndef GJ_MEMDEBUG
287 	p = malloc(size, M_JOURNAL, flags | M_WAITOK);
288 #else
289 	mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
290 	p = (u_char *)mi + sizeof(*mi);
291 	mi->mi_size = size;
292 	stack_save(&mi->mi_stack);
293 #endif
294 	return (p);
295 }
296 
297 static void
298 gj_free(void *p, size_t size)
299 {
300 #ifdef GJ_MEMDEBUG
301 	struct meminfo *mi;
302 #endif
303 
304 	KASSERT(p != NULL, ("p=NULL"));
305 	KASSERT(size > 0, ("size=0"));
306 	mtx_lock(&g_journal_cache_mtx);
307 	KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
308 	g_journal_cache_used -= size;
309 	mtx_unlock(&g_journal_cache_mtx);
310 #ifdef GJ_MEMDEBUG
311 	mi = p = (void *)((u_char *)p - sizeof(*mi));
312 	if (mi->mi_size != size) {
313 		printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
314 		    mi->mi_size);
315 		printf("GJOURNAL: Alloc backtrace:\n");
316 		stack_print(&mi->mi_stack);
317 		printf("GJOURNAL: Free backtrace:\n");
318 		kdb_backtrace();
319 	}
320 #endif
321 	free(p, M_JOURNAL);
322 }
323 
324 static void *
325 gj_realloc(void *p, size_t size, size_t oldsize)
326 {
327 	void *np;
328 
329 #ifndef GJ_MEMDEBUG
330 	mtx_lock(&g_journal_cache_mtx);
331 	g_journal_cache_used -= oldsize;
332 	g_journal_cache_used += size;
333 	mtx_unlock(&g_journal_cache_mtx);
334 	np = realloc(p, size, M_JOURNAL, M_WAITOK);
335 #else
336 	np = gj_malloc(size, M_WAITOK);
337 	bcopy(p, np, MIN(oldsize, size));
338 	gj_free(p, oldsize);
339 #endif
340 	return (np);
341 }
342 
343 static void
344 g_journal_check_overflow(struct g_journal_softc *sc)
345 {
346 	off_t length, used;
347 
348 	if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
349 	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
350 	    (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
351 	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
352 	     sc->sc_journal_offset < sc->sc_active.jj_offset)) {
353 		panic("Journal overflow "
354 		    "(id = %u joffset=%jd active=%jd inactive=%jd)",
355 		    (unsigned)sc->sc_id,
356 		    (intmax_t)sc->sc_journal_offset,
357 		    (intmax_t)sc->sc_active.jj_offset,
358 		    (intmax_t)sc->sc_inactive.jj_offset);
359 	}
360 	if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
361 		length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
362 		used = sc->sc_journal_offset - sc->sc_active.jj_offset;
363 	} else {
364 		length = sc->sc_jend - sc->sc_active.jj_offset;
365 		length += sc->sc_inactive.jj_offset - sc->sc_jstart;
366 		if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
367 			used = sc->sc_journal_offset - sc->sc_active.jj_offset;
368 		else {
369 			used = sc->sc_jend - sc->sc_active.jj_offset;
370 			used += sc->sc_journal_offset - sc->sc_jstart;
371 		}
372 	}
373 	/* Already woken up? */
374 	if (g_journal_switcher_wokenup)
375 		return;
376 	/*
377 	 * If the active journal takes more than g_journal_force_switch precent
378 	 * of free journal space, we force journal switch.
379 	 */
380 	KASSERT(length > 0,
381 	    ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
382 	    (intmax_t)length, (intmax_t)used,
383 	    (intmax_t)sc->sc_active.jj_offset,
384 	    (intmax_t)sc->sc_inactive.jj_offset,
385 	    (intmax_t)sc->sc_journal_offset));
386 	if ((used * 100) / length > g_journal_force_switch) {
387 		g_journal_stats_journal_full++;
388 		GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
389 		    sc->sc_name, (used * 100) / length);
390 		mtx_lock(&g_journal_cache_mtx);
391 		g_journal_switcher_wokenup = 1;
392 		wakeup(&g_journal_switcher_state);
393 		mtx_unlock(&g_journal_cache_mtx);
394 	}
395 }
396 
397 static void
398 g_journal_orphan(struct g_consumer *cp)
399 {
400 	struct g_journal_softc *sc;
401 	char name[256];
402 	int error;
403 
404 	g_topology_assert();
405 	sc = cp->geom->softc;
406 	strlcpy(name, cp->provider->name, sizeof(name));
407 	GJ_DEBUG(0, "Lost provider %s.", name);
408 	if (sc == NULL)
409 		return;
410 	error = g_journal_destroy(sc);
411 	if (error == 0)
412 		GJ_DEBUG(0, "Journal %s destroyed.", name);
413 	else {
414 		GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
415 		    "Destroy it manually after last close.", sc->sc_name,
416 		    error);
417 	}
418 }
419 
420 static int
421 g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
422 {
423 	struct g_journal_softc *sc;
424 	int dcr, dcw, dce;
425 
426 	g_topology_assert();
427 	GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
428 	    acr, acw, ace);
429 
430 	dcr = pp->acr + acr;
431 	dcw = pp->acw + acw;
432 	dce = pp->ace + ace;
433 
434 	sc = pp->geom->softc;
435 	if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
436 		if (acr <= 0 && acw <= 0 && ace <= 0)
437 			return (0);
438 		else
439 			return (ENXIO);
440 	}
441 	if (pp->acw == 0 && dcw > 0) {
442 		GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
443 		sc->sc_flags &= ~GJF_DEVICE_CLEAN;
444 		g_topology_unlock();
445 		g_journal_metadata_update(sc);
446 		g_topology_lock();
447 	} /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
448 		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
449 		sc->sc_flags |= GJF_DEVICE_CLEAN;
450 		g_topology_unlock();
451 		g_journal_metadata_update(sc);
452 		g_topology_lock();
453 	} */
454 	return (0);
455 }
456 
457 static void
458 g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
459 {
460 
461 	bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
462 	data += sizeof(GJ_HEADER_MAGIC);
463 	le32enc(data, hdr->jh_journal_id);
464 	data += 4;
465 	le32enc(data, hdr->jh_journal_next_id);
466 }
467 
468 static int
469 g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
470 {
471 
472 	bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
473 	data += sizeof(hdr->jh_magic);
474 	if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
475 		return (EINVAL);
476 	hdr->jh_journal_id = le32dec(data);
477 	data += 4;
478 	hdr->jh_journal_next_id = le32dec(data);
479 	return (0);
480 }
481 
482 static void
483 g_journal_flush_cache(struct g_journal_softc *sc)
484 {
485 	struct bintime bt;
486 	int error;
487 
488 	if (sc->sc_bio_flush == 0)
489 		return;
490 	GJ_TIMER_START(1, &bt);
491 	if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
492 		error = g_io_flush(sc->sc_jconsumer);
493 		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
494 		    sc->sc_jconsumer->provider->name, error);
495 	}
496 	if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
497 		/*
498 		 * TODO: This could be called in parallel with the
499 		 *       previous call.
500 		 */
501 		error = g_io_flush(sc->sc_dconsumer);
502 		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
503 		    sc->sc_dconsumer->provider->name, error);
504 	}
505 	GJ_TIMER_STOP(1, &bt, "Cache flush time");
506 }
507 
508 static int
509 g_journal_write_header(struct g_journal_softc *sc)
510 {
511 	struct g_journal_header hdr;
512 	struct g_consumer *cp;
513 	u_char *buf;
514 	int error;
515 
516 	cp = sc->sc_jconsumer;
517 	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
518 
519 	strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
520 	hdr.jh_journal_id = sc->sc_journal_id;
521 	hdr.jh_journal_next_id = sc->sc_journal_next_id;
522 	g_journal_header_encode(&hdr, buf);
523 	error = g_write_data(cp, sc->sc_journal_offset, buf,
524 	    cp->provider->sectorsize);
525 	/* if (error == 0) */
526 	sc->sc_journal_offset += cp->provider->sectorsize;
527 
528 	gj_free(buf, cp->provider->sectorsize);
529 	return (error);
530 }
531 
532 /*
533  * Every journal record has a header and data following it.
534  * Functions below are used to decode the header before storing it to
535  * little endian and to encode it after reading to system endianness.
536  */
537 static void
538 g_journal_record_header_encode(struct g_journal_record_header *hdr,
539     u_char *data)
540 {
541 	struct g_journal_entry *ent;
542 	u_int i;
543 
544 	bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
545 	data += sizeof(GJ_RECORD_HEADER_MAGIC);
546 	le32enc(data, hdr->jrh_journal_id);
547 	data += 8;
548 	le16enc(data, hdr->jrh_nentries);
549 	data += 2;
550 	bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
551 	data += 8;
552 	for (i = 0; i < hdr->jrh_nentries; i++) {
553 		ent = &hdr->jrh_entries[i];
554 		le64enc(data, ent->je_joffset);
555 		data += 8;
556 		le64enc(data, ent->je_offset);
557 		data += 8;
558 		le64enc(data, ent->je_length);
559 		data += 8;
560 	}
561 }
562 
563 static int
564 g_journal_record_header_decode(const u_char *data,
565     struct g_journal_record_header *hdr)
566 {
567 	struct g_journal_entry *ent;
568 	u_int i;
569 
570 	bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
571 	data += sizeof(hdr->jrh_magic);
572 	if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
573 		return (EINVAL);
574 	hdr->jrh_journal_id = le32dec(data);
575 	data += 8;
576 	hdr->jrh_nentries = le16dec(data);
577 	data += 2;
578 	if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
579 		return (EINVAL);
580 	bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
581 	data += 8;
582 	for (i = 0; i < hdr->jrh_nentries; i++) {
583 		ent = &hdr->jrh_entries[i];
584 		ent->je_joffset = le64dec(data);
585 		data += 8;
586 		ent->je_offset = le64dec(data);
587 		data += 8;
588 		ent->je_length = le64dec(data);
589 		data += 8;
590 	}
591 	return (0);
592 }
593 
594 /*
595  * Function reads metadata from a provider (via the given consumer), decodes
596  * it to system endianness and verifies its correctness.
597  */
598 static int
599 g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
600 {
601 	struct g_provider *pp;
602 	u_char *buf;
603 	int error;
604 
605 	g_topology_assert();
606 
607 	error = g_access(cp, 1, 0, 0);
608 	if (error != 0)
609 		return (error);
610 	pp = cp->provider;
611 	g_topology_unlock();
612 	/* Metadata is stored in last sector. */
613 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
614 	    &error);
615 	g_topology_lock();
616 	g_access(cp, -1, 0, 0);
617 	if (buf == NULL) {
618 		GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
619 		    cp->provider->name, error);
620 		return (error);
621 	}
622 
623 	/* Decode metadata. */
624 	error = journal_metadata_decode(buf, md);
625 	g_free(buf);
626 	/* Is this is gjournal provider at all? */
627 	if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
628 		return (EINVAL);
629 	/*
630 	 * Are we able to handle this version of metadata?
631 	 * We only maintain backward compatibility.
632 	 */
633 	if (md->md_version > G_JOURNAL_VERSION) {
634 		GJ_DEBUG(0,
635 		    "Kernel module is too old to handle metadata from %s.",
636 		    cp->provider->name);
637 		return (EINVAL);
638 	}
639 	/* Is checksum correct? */
640 	if (error != 0) {
641 		GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
642 		    cp->provider->name);
643 		return (error);
644 	}
645 	return (0);
646 }
647 
648 /*
649  * Two functions below are responsible for updating metadata.
650  * Only metadata on the data provider is updated (we need to update
651  * information about active journal in there).
652  */
653 static void
654 g_journal_metadata_done(struct bio *bp)
655 {
656 
657 	/*
658 	 * There is not much we can do on error except informing about it.
659 	 */
660 	if (bp->bio_error != 0) {
661 		GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
662 		    bp->bio_error);
663 	} else {
664 		GJ_LOGREQ(2, bp, "Metadata updated.");
665 	}
666 	gj_free(bp->bio_data, bp->bio_length);
667 	g_destroy_bio(bp);
668 }
669 
670 static void
671 g_journal_metadata_update(struct g_journal_softc *sc)
672 {
673 	struct g_journal_metadata md;
674 	struct g_consumer *cp;
675 	struct bio *bp;
676 	u_char *sector;
677 
678 	cp = sc->sc_dconsumer;
679 	sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
680 	strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
681 	md.md_version = G_JOURNAL_VERSION;
682 	md.md_id = sc->sc_id;
683 	md.md_type = sc->sc_orig_type;
684 	md.md_jstart = sc->sc_jstart;
685 	md.md_jend = sc->sc_jend;
686 	md.md_joffset = sc->sc_inactive.jj_offset;
687 	md.md_jid = sc->sc_journal_previous_id;
688 	md.md_flags = 0;
689 	if (sc->sc_flags & GJF_DEVICE_CLEAN)
690 		md.md_flags |= GJ_FLAG_CLEAN;
691 
692 	if (sc->sc_flags & GJF_DEVICE_HARDCODED)
693 		strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
694 	else
695 		bzero(md.md_provider, sizeof(md.md_provider));
696 	md.md_provsize = cp->provider->mediasize;
697 	journal_metadata_encode(&md, sector);
698 
699 	/*
700 	 * Flush the cache, so we know all data are on disk.
701 	 * We write here informations like "journal is consistent", so we need
702 	 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
703 	 * where metadata is stored on disk, but not all data.
704 	 */
705 	g_journal_flush_cache(sc);
706 
707 	bp = g_alloc_bio();
708 	bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
709 	bp->bio_length = cp->provider->sectorsize;
710 	bp->bio_data = sector;
711 	bp->bio_cmd = BIO_WRITE;
712 	if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
713 		bp->bio_done = g_journal_metadata_done;
714 		g_io_request(bp, cp);
715 	} else {
716 		bp->bio_done = NULL;
717 		g_io_request(bp, cp);
718 		biowait(bp, "gjmdu");
719 		g_journal_metadata_done(bp);
720 	}
721 
722 	/*
723 	 * Be sure metadata reached the disk.
724 	 */
725 	g_journal_flush_cache(sc);
726 }
727 
728 /*
729  * This is where the I/O request comes from the GEOM.
730  */
731 static void
732 g_journal_start(struct bio *bp)
733 {
734 	struct g_journal_softc *sc;
735 
736 	sc = bp->bio_to->geom->softc;
737 	GJ_LOGREQ(3, bp, "Request received.");
738 
739 	switch (bp->bio_cmd) {
740 	case BIO_READ:
741 	case BIO_WRITE:
742 		mtx_lock(&sc->sc_mtx);
743 		bioq_insert_tail(&sc->sc_regular_queue, bp);
744 		wakeup(sc);
745 		mtx_unlock(&sc->sc_mtx);
746 		return;
747 	case BIO_GETATTR:
748 		if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
749 			strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
750 			bp->bio_completed = strlen(bp->bio_to->name) + 1;
751 			g_io_deliver(bp, 0);
752 			return;
753 		}
754 		/* FALLTHROUGH */
755 	case BIO_SPEEDUP:
756 	case BIO_DELETE:
757 	default:
758 		g_io_deliver(bp, EOPNOTSUPP);
759 		return;
760 	}
761 }
762 
763 static void
764 g_journal_std_done(struct bio *bp)
765 {
766 	struct g_journal_softc *sc;
767 
768 	sc = bp->bio_from->geom->softc;
769 	mtx_lock(&sc->sc_mtx);
770 	bioq_insert_tail(&sc->sc_back_queue, bp);
771 	wakeup(sc);
772 	mtx_unlock(&sc->sc_mtx);
773 }
774 
775 static struct bio *
776 g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
777     int flags)
778 {
779 	struct bio *bp;
780 
781 	bp = g_alloc_bio();
782 	bp->bio_offset = start;
783 	bp->bio_joffset = joffset;
784 	bp->bio_length = end - start;
785 	bp->bio_cmd = BIO_WRITE;
786 	bp->bio_done = g_journal_std_done;
787 	if (data == NULL)
788 		bp->bio_data = NULL;
789 	else {
790 		bp->bio_data = gj_malloc(bp->bio_length, flags);
791 		if (bp->bio_data != NULL)
792 			bcopy(data, bp->bio_data, bp->bio_length);
793 	}
794 	return (bp);
795 }
796 
797 #define	g_journal_insert_bio(head, bp, flags)				\
798 	g_journal_insert((head), (bp)->bio_offset,			\
799 		(bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset,	\
800 		(bp)->bio_data, flags)
801 /*
802  * The function below does a lot more than just inserting bio to the queue.
803  * It keeps the queue sorted by offset and ensures that there are no doubled
804  * data (it combines bios where ranges overlap).
805  *
806  * The function returns the number of bios inserted (as bio can be splitted).
807  */
808 static int
809 g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
810     u_char *data, int flags)
811 {
812 	struct bio *nbp, *cbp, *pbp;
813 	off_t cstart, cend;
814 	u_char *tmpdata;
815 	int n;
816 
817 	GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
818 	    joffset);
819 	n = 0;
820 	pbp = NULL;
821 	GJQ_FOREACH(*head, cbp) {
822 		cstart = cbp->bio_offset;
823 		cend = cbp->bio_offset + cbp->bio_length;
824 
825 		if (nstart >= cend) {
826 			/*
827 			 *  +-------------+
828 			 *  |             |
829 			 *  |   current   |  +-------------+
830 			 *  |     bio     |  |             |
831 			 *  |             |  |     new     |
832 			 *  +-------------+  |     bio     |
833 			 *                   |             |
834 			 *                   +-------------+
835 			 */
836 			GJ_DEBUG(3, "INSERT(%p): 1", *head);
837 		} else if (nend <= cstart) {
838 			/*
839 			 *                   +-------------+
840 			 *                   |             |
841 			 *  +-------------+  |   current   |
842 			 *  |             |  |     bio     |
843 			 *  |     new     |  |             |
844 			 *  |     bio     |  +-------------+
845 			 *  |             |
846 			 *  +-------------+
847 			 */
848 			nbp = g_journal_new_bio(nstart, nend, joffset, data,
849 			    flags);
850 			if (pbp == NULL)
851 				*head = nbp;
852 			else
853 				pbp->bio_next = nbp;
854 			nbp->bio_next = cbp;
855 			n++;
856 			GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
857 			    pbp);
858 			goto end;
859 		} else if (nstart <= cstart && nend >= cend) {
860 			/*
861 			 *      +-------------+      +-------------+
862 			 *      | current bio |      | current bio |
863 			 *  +---+-------------+---+  +-------------+---+
864 			 *  |   |             |   |  |             |   |
865 			 *  |   |             |   |  |             |   |
866 			 *  |   +-------------+   |  +-------------+   |
867 			 *  |       new bio       |  |     new bio     |
868 			 *  +---------------------+  +-----------------+
869 			 *
870 			 *      +-------------+  +-------------+
871 			 *      | current bio |  | current bio |
872 			 *  +---+-------------+  +-------------+
873 			 *  |   |             |  |             |
874 			 *  |   |             |  |             |
875 			 *  |   +-------------+  +-------------+
876 			 *  |     new bio     |  |   new bio   |
877 			 *  +-----------------+  +-------------+
878 			 */
879 			g_journal_stats_bytes_skipped += cbp->bio_length;
880 			cbp->bio_offset = nstart;
881 			cbp->bio_joffset = joffset;
882 			cbp->bio_length = cend - nstart;
883 			if (cbp->bio_data != NULL) {
884 				gj_free(cbp->bio_data, cend - cstart);
885 				cbp->bio_data = NULL;
886 			}
887 			if (data != NULL) {
888 				cbp->bio_data = gj_malloc(cbp->bio_length,
889 				    flags);
890 				if (cbp->bio_data != NULL) {
891 					bcopy(data, cbp->bio_data,
892 					    cbp->bio_length);
893 				}
894 				data += cend - nstart;
895 			}
896 			joffset += cend - nstart;
897 			nstart = cend;
898 			GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
899 		} else if (nstart > cstart && nend >= cend) {
900 			/*
901 			 *  +-----------------+  +-------------+
902 			 *  |   current bio   |  | current bio |
903 			 *  |   +-------------+  |   +---------+---+
904 			 *  |   |             |  |   |         |   |
905 			 *  |   |             |  |   |         |   |
906 			 *  +---+-------------+  +---+---------+   |
907 			 *      |   new bio   |      |   new bio   |
908 			 *      +-------------+      +-------------+
909 			 */
910 			g_journal_stats_bytes_skipped += cend - nstart;
911 			nbp = g_journal_new_bio(nstart, cend, joffset, data,
912 			    flags);
913 			nbp->bio_next = cbp->bio_next;
914 			cbp->bio_next = nbp;
915 			cbp->bio_length = nstart - cstart;
916 			if (cbp->bio_data != NULL) {
917 				cbp->bio_data = gj_realloc(cbp->bio_data,
918 				    cbp->bio_length, cend - cstart);
919 			}
920 			if (data != NULL)
921 				data += cend - nstart;
922 			joffset += cend - nstart;
923 			nstart = cend;
924 			n++;
925 			GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
926 		} else if (nstart > cstart && nend < cend) {
927 			/*
928 			 *  +---------------------+
929 			 *  |     current bio     |
930 			 *  |   +-------------+   |
931 			 *  |   |             |   |
932 			 *  |   |             |   |
933 			 *  +---+-------------+---+
934 			 *      |   new bio   |
935 			 *      +-------------+
936 			 */
937 			g_journal_stats_bytes_skipped += nend - nstart;
938 			nbp = g_journal_new_bio(nstart, nend, joffset, data,
939 			    flags);
940 			nbp->bio_next = cbp->bio_next;
941 			cbp->bio_next = nbp;
942 			if (cbp->bio_data == NULL)
943 				tmpdata = NULL;
944 			else
945 				tmpdata = cbp->bio_data + nend - cstart;
946 			nbp = g_journal_new_bio(nend, cend,
947 			    cbp->bio_joffset + nend - cstart, tmpdata, flags);
948 			nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
949 			((struct bio *)cbp->bio_next)->bio_next = nbp;
950 			cbp->bio_length = nstart - cstart;
951 			if (cbp->bio_data != NULL) {
952 				cbp->bio_data = gj_realloc(cbp->bio_data,
953 				    cbp->bio_length, cend - cstart);
954 			}
955 			n += 2;
956 			GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
957 			goto end;
958 		} else if (nstart <= cstart && nend < cend) {
959 			/*
960 			 *  +-----------------+      +-------------+
961 			 *  |   current bio   |      | current bio |
962 			 *  +-------------+   |  +---+---------+   |
963 			 *  |             |   |  |   |         |   |
964 			 *  |             |   |  |   |         |   |
965 			 *  +-------------+---+  |   +---------+---+
966 			 *  |   new bio   |      |   new bio   |
967 			 *  +-------------+      +-------------+
968 			 */
969 			g_journal_stats_bytes_skipped += nend - nstart;
970 			nbp = g_journal_new_bio(nstart, nend, joffset, data,
971 			    flags);
972 			if (pbp == NULL)
973 				*head = nbp;
974 			else
975 				pbp->bio_next = nbp;
976 			nbp->bio_next = cbp;
977 			cbp->bio_offset = nend;
978 			cbp->bio_length = cend - nend;
979 			cbp->bio_joffset += nend - cstart;
980 			tmpdata = cbp->bio_data;
981 			if (tmpdata != NULL) {
982 				cbp->bio_data = gj_malloc(cbp->bio_length,
983 				    flags);
984 				if (cbp->bio_data != NULL) {
985 					bcopy(tmpdata + nend - cstart,
986 					    cbp->bio_data, cbp->bio_length);
987 				}
988 				gj_free(tmpdata, cend - cstart);
989 			}
990 			n++;
991 			GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
992 			goto end;
993 		}
994 		if (nstart == nend)
995 			goto end;
996 		pbp = cbp;
997 	}
998 	nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
999 	if (pbp == NULL)
1000 		*head = nbp;
1001 	else
1002 		pbp->bio_next = nbp;
1003 	nbp->bio_next = NULL;
1004 	n++;
1005 	GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
1006 end:
1007 	if (g_journal_debug >= 3) {
1008 		GJQ_FOREACH(*head, cbp) {
1009 			GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
1010 			    (intmax_t)cbp->bio_offset,
1011 			    (intmax_t)cbp->bio_length,
1012 			    (intmax_t)cbp->bio_joffset, cbp->bio_data);
1013 		}
1014 		GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1015 	}
1016 	return (n);
1017 }
1018 
1019 /*
1020  * The function combines neighbour bios trying to squeeze as much data as
1021  * possible into one bio.
1022  *
1023  * The function returns the number of bios combined (negative value).
1024  */
1025 static int
1026 g_journal_optimize(struct bio *head)
1027 {
1028 	struct bio *cbp, *pbp;
1029 	int n;
1030 
1031 	n = 0;
1032 	pbp = NULL;
1033 	GJQ_FOREACH(head, cbp) {
1034 		/* Skip bios which has to be read first. */
1035 		if (cbp->bio_data == NULL) {
1036 			pbp = NULL;
1037 			continue;
1038 		}
1039 		/* There is no previous bio yet. */
1040 		if (pbp == NULL) {
1041 			pbp = cbp;
1042 			continue;
1043 		}
1044 		/* Is this a neighbour bio? */
1045 		if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1046 			/* Be sure that bios queue is sorted. */
1047 			KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1048 			    ("poffset=%jd plength=%jd coffset=%jd",
1049 			    (intmax_t)pbp->bio_offset,
1050 			    (intmax_t)pbp->bio_length,
1051 			    (intmax_t)cbp->bio_offset));
1052 			pbp = cbp;
1053 			continue;
1054 		}
1055 		/* Be sure we don't end up with too big bio. */
1056 		if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1057 			pbp = cbp;
1058 			continue;
1059 		}
1060 		/* Ok, we can join bios. */
1061 		GJ_LOGREQ(4, pbp, "Join: ");
1062 		GJ_LOGREQ(4, cbp, "and: ");
1063 		pbp->bio_data = gj_realloc(pbp->bio_data,
1064 		    pbp->bio_length + cbp->bio_length, pbp->bio_length);
1065 		bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1066 		    cbp->bio_length);
1067 		gj_free(cbp->bio_data, cbp->bio_length);
1068 		pbp->bio_length += cbp->bio_length;
1069 		pbp->bio_next = cbp->bio_next;
1070 		g_destroy_bio(cbp);
1071 		cbp = pbp;
1072 		g_journal_stats_combined_ios++;
1073 		n--;
1074 		GJ_LOGREQ(4, pbp, "Got: ");
1075 	}
1076 	return (n);
1077 }
1078 
1079 /*
1080  * TODO: Update comment.
1081  * These are functions responsible for copying one portion of data from journal
1082  * to the destination provider.
1083  * The order goes like this:
1084  * 1. Read the header, which contains informations about data blocks
1085  *    following it.
1086  * 2. Read the data blocks from the journal.
1087  * 3. Write the data blocks on the data provider.
1088  *
1089  * g_journal_copy_start()
1090  * g_journal_copy_done() - got finished write request, logs potential errors.
1091  */
1092 
1093 /*
1094  * When there is no data in cache, this function is used to read it.
1095  */
1096 static void
1097 g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1098 {
1099 	struct bio *cbp;
1100 
1101 	/*
1102 	 * We were short in memory, so data was freed.
1103 	 * In that case we need to read it back from journal.
1104 	 */
1105 	cbp = g_alloc_bio();
1106 	cbp->bio_cflags = bp->bio_cflags;
1107 	cbp->bio_parent = bp;
1108 	cbp->bio_offset = bp->bio_joffset;
1109 	cbp->bio_length = bp->bio_length;
1110 	cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1111 	cbp->bio_cmd = BIO_READ;
1112 	cbp->bio_done = g_journal_std_done;
1113 	GJ_LOGREQ(4, cbp, "READ FIRST");
1114 	g_io_request(cbp, sc->sc_jconsumer);
1115 	g_journal_cache_misses++;
1116 }
1117 
1118 static void
1119 g_journal_copy_send(struct g_journal_softc *sc)
1120 {
1121 	struct bio *bioq, *bp, *lbp;
1122 
1123 	bioq = lbp = NULL;
1124 	mtx_lock(&sc->sc_mtx);
1125 	for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1126 		bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1127 		if (bp == NULL)
1128 			break;
1129 		GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1130 		sc->sc_copy_in_progress++;
1131 		GJQ_INSERT_AFTER(bioq, bp, lbp);
1132 		lbp = bp;
1133 	}
1134 	mtx_unlock(&sc->sc_mtx);
1135 	if (g_journal_do_optimize)
1136 		sc->sc_copy_in_progress += g_journal_optimize(bioq);
1137 	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1138 		GJQ_REMOVE(bioq, bp);
1139 		GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1140 		bp->bio_cflags = GJ_BIO_COPY;
1141 		if (bp->bio_data == NULL)
1142 			g_journal_read_first(sc, bp);
1143 		else {
1144 			bp->bio_joffset = 0;
1145 			GJ_LOGREQ(4, bp, "SEND");
1146 			g_io_request(bp, sc->sc_dconsumer);
1147 		}
1148 	}
1149 }
1150 
1151 static void
1152 g_journal_copy_start(struct g_journal_softc *sc)
1153 {
1154 
1155 	/*
1156 	 * Remember in metadata that we're starting to copy journaled data
1157 	 * to the data provider.
1158 	 * In case of power failure, we will copy these data once again on boot.
1159 	 */
1160 	if (!sc->sc_journal_copying) {
1161 		sc->sc_journal_copying = 1;
1162 		GJ_DEBUG(1, "Starting copy of journal.");
1163 		g_journal_metadata_update(sc);
1164 	}
1165 	g_journal_copy_send(sc);
1166 }
1167 
1168 /*
1169  * Data block has been read from the journal provider.
1170  */
1171 static int
1172 g_journal_copy_read_done(struct bio *bp)
1173 {
1174 	struct g_journal_softc *sc;
1175 	struct g_consumer *cp;
1176 	struct bio *pbp;
1177 
1178 	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1179 	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1180 
1181 	sc = bp->bio_from->geom->softc;
1182 	pbp = bp->bio_parent;
1183 
1184 	if (bp->bio_error != 0) {
1185 		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1186 		    bp->bio_to->name, bp->bio_error);
1187 		/*
1188 		 * We will not be able to deliver WRITE request as well.
1189 		 */
1190 		gj_free(bp->bio_data, bp->bio_length);
1191 		g_destroy_bio(pbp);
1192 		g_destroy_bio(bp);
1193 		sc->sc_copy_in_progress--;
1194 		return (1);
1195 	}
1196 	pbp->bio_data = bp->bio_data;
1197 	cp = sc->sc_dconsumer;
1198 	g_io_request(pbp, cp);
1199 	GJ_LOGREQ(4, bp, "READ DONE");
1200 	g_destroy_bio(bp);
1201 	return (0);
1202 }
1203 
1204 /*
1205  * Data block has been written to the data provider.
1206  */
1207 static void
1208 g_journal_copy_write_done(struct bio *bp)
1209 {
1210 	struct g_journal_softc *sc;
1211 
1212 	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1213 	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1214 
1215 	sc = bp->bio_from->geom->softc;
1216 	sc->sc_copy_in_progress--;
1217 
1218 	if (bp->bio_error != 0) {
1219 		GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1220 		    bp->bio_error);
1221 	}
1222 	GJQ_REMOVE(sc->sc_copy_queue, bp);
1223 	gj_free(bp->bio_data, bp->bio_length);
1224 	GJ_LOGREQ(4, bp, "DONE");
1225 	g_destroy_bio(bp);
1226 
1227 	if (sc->sc_copy_in_progress == 0) {
1228 		/*
1229 		 * This was the last write request for this journal.
1230 		 */
1231 		GJ_DEBUG(1, "Data has been copied.");
1232 		sc->sc_journal_copying = 0;
1233 	}
1234 }
1235 
1236 static void g_journal_flush_done(struct bio *bp);
1237 
1238 /*
1239  * Flush one record onto active journal provider.
1240  */
1241 static void
1242 g_journal_flush(struct g_journal_softc *sc)
1243 {
1244 	struct g_journal_record_header hdr;
1245 	struct g_journal_entry *ent;
1246 	struct g_provider *pp;
1247 	struct bio **bioq;
1248 	struct bio *bp, *fbp, *pbp;
1249 	off_t joffset;
1250 	u_char *data, hash[16];
1251 	MD5_CTX ctx;
1252 	u_int i;
1253 
1254 	if (sc->sc_current_count == 0)
1255 		return;
1256 
1257 	pp = sc->sc_jprovider;
1258 	GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1259 	joffset = sc->sc_journal_offset;
1260 
1261 	GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1262 	    sc->sc_current_count, pp->name, (intmax_t)joffset);
1263 
1264 	/*
1265 	 * Store 'journal id', so we know to which journal this record belongs.
1266 	 */
1267 	hdr.jrh_journal_id = sc->sc_journal_id;
1268 	/* Could be less than g_journal_record_entries if called due timeout. */
1269 	hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1270 	strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1271 
1272 	bioq = &sc->sc_active.jj_queue;
1273 	GJQ_LAST(sc->sc_flush_queue, pbp);
1274 
1275 	fbp = g_alloc_bio();
1276 	fbp->bio_parent = NULL;
1277 	fbp->bio_cflags = GJ_BIO_JOURNAL;
1278 	fbp->bio_offset = -1;
1279 	fbp->bio_joffset = joffset;
1280 	fbp->bio_length = pp->sectorsize;
1281 	fbp->bio_cmd = BIO_WRITE;
1282 	fbp->bio_done = g_journal_std_done;
1283 	GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1284 	pbp = fbp;
1285 	fbp->bio_to = pp;
1286 	GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1287 	joffset += pp->sectorsize;
1288 	sc->sc_flush_count++;
1289 	if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1290 		MD5Init(&ctx);
1291 
1292 	for (i = 0; i < hdr.jrh_nentries; i++) {
1293 		bp = sc->sc_current_queue;
1294 		KASSERT(bp != NULL, ("NULL bp"));
1295 		bp->bio_to = pp;
1296 		GJ_LOGREQ(4, bp, "FLUSHED");
1297 		sc->sc_current_queue = bp->bio_next;
1298 		bp->bio_next = NULL;
1299 		sc->sc_current_count--;
1300 
1301 		/* Add to the header. */
1302 		ent = &hdr.jrh_entries[i];
1303 		ent->je_offset = bp->bio_offset;
1304 		ent->je_joffset = joffset;
1305 		ent->je_length = bp->bio_length;
1306 
1307 		data = bp->bio_data;
1308 		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1309 			MD5Update(&ctx, data, ent->je_length);
1310 		g_reset_bio(bp);
1311 		bp->bio_cflags = GJ_BIO_JOURNAL;
1312 		bp->bio_offset = ent->je_offset;
1313 		bp->bio_joffset = ent->je_joffset;
1314 		bp->bio_length = ent->je_length;
1315 		bp->bio_data = data;
1316 		bp->bio_cmd = BIO_WRITE;
1317 		bp->bio_done = g_journal_std_done;
1318 		GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1319 		pbp = bp;
1320 		bp->bio_to = pp;
1321 		GJ_LOGREQ(4, bp, "FLUSH_OUT");
1322 		joffset += bp->bio_length;
1323 		sc->sc_flush_count++;
1324 
1325 		/*
1326 		 * Add request to the active sc_journal_queue queue.
1327 		 * This is our cache. After journal switch we don't have to
1328 		 * read the data from the inactive journal, because we keep
1329 		 * it in memory.
1330 		 */
1331 		g_journal_insert(bioq, ent->je_offset,
1332 		    ent->je_offset + ent->je_length, ent->je_joffset, data,
1333 		    M_NOWAIT);
1334 	}
1335 
1336 	/*
1337 	 * After all requests, store valid header.
1338 	 */
1339 	data = gj_malloc(pp->sectorsize, M_WAITOK);
1340 	if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1341 		MD5Final(hash, &ctx);
1342 		bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1343 	}
1344 	g_journal_record_header_encode(&hdr, data);
1345 	fbp->bio_data = data;
1346 
1347 	sc->sc_journal_offset = joffset;
1348 
1349 	g_journal_check_overflow(sc);
1350 }
1351 
1352 /*
1353  * Flush request finished.
1354  */
1355 static void
1356 g_journal_flush_done(struct bio *bp)
1357 {
1358 	struct g_journal_softc *sc;
1359 	struct g_consumer *cp;
1360 
1361 	KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1362 	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1363 
1364 	cp = bp->bio_from;
1365 	sc = cp->geom->softc;
1366 	sc->sc_flush_in_progress--;
1367 
1368 	if (bp->bio_error != 0) {
1369 		GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1370 		    bp->bio_error);
1371 	}
1372 	gj_free(bp->bio_data, bp->bio_length);
1373 	GJ_LOGREQ(4, bp, "DONE");
1374 	g_destroy_bio(bp);
1375 }
1376 
1377 static void g_journal_release_delayed(struct g_journal_softc *sc);
1378 
1379 static void
1380 g_journal_flush_send(struct g_journal_softc *sc)
1381 {
1382 	struct g_consumer *cp;
1383 	struct bio *bioq, *bp, *lbp;
1384 
1385 	cp = sc->sc_jconsumer;
1386 	bioq = lbp = NULL;
1387 	while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1388 		/* Send one flush requests to the active journal. */
1389 		bp = GJQ_FIRST(sc->sc_flush_queue);
1390 		if (bp != NULL) {
1391 			GJQ_REMOVE(sc->sc_flush_queue, bp);
1392 			sc->sc_flush_count--;
1393 			bp->bio_offset = bp->bio_joffset;
1394 			bp->bio_joffset = 0;
1395 			sc->sc_flush_in_progress++;
1396 			GJQ_INSERT_AFTER(bioq, bp, lbp);
1397 			lbp = bp;
1398 		}
1399 		/* Try to release delayed requests. */
1400 		g_journal_release_delayed(sc);
1401 		/* If there are no requests to flush, leave. */
1402 		if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1403 			break;
1404 	}
1405 	if (g_journal_do_optimize)
1406 		sc->sc_flush_in_progress += g_journal_optimize(bioq);
1407 	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1408 		GJQ_REMOVE(bioq, bp);
1409 		GJ_LOGREQ(3, bp, "Flush request send");
1410 		g_io_request(bp, cp);
1411 	}
1412 }
1413 
1414 static void
1415 g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1416 {
1417 	int n;
1418 
1419 	GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1420 	n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1421 	sc->sc_current_count += n;
1422 	n = g_journal_optimize(sc->sc_current_queue);
1423 	sc->sc_current_count += n;
1424 	/*
1425 	 * For requests which are added to the current queue we deliver
1426 	 * response immediately.
1427 	 */
1428 	bp->bio_completed = bp->bio_length;
1429 	g_io_deliver(bp, 0);
1430 	if (sc->sc_current_count >= g_journal_record_entries) {
1431 		/*
1432 		 * Let's flush one record onto active journal provider.
1433 		 */
1434 		g_journal_flush(sc);
1435 	}
1436 }
1437 
1438 static void
1439 g_journal_release_delayed(struct g_journal_softc *sc)
1440 {
1441 	struct bio *bp;
1442 
1443 	for (;;) {
1444 		/* The flush queue is full, exit. */
1445 		if (sc->sc_flush_count >= g_journal_accept_immediately)
1446 			return;
1447 		bp = bioq_takefirst(&sc->sc_delayed_queue);
1448 		if (bp == NULL)
1449 			return;
1450 		sc->sc_delayed_count--;
1451 		g_journal_add_current(sc, bp);
1452 	}
1453 }
1454 
1455 /*
1456  * Add I/O request to the current queue. If we have enough requests for one
1457  * journal record we flush them onto active journal provider.
1458  */
1459 static void
1460 g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1461 {
1462 
1463 	/*
1464 	 * The flush queue is full, we need to delay the request.
1465 	 */
1466 	if (sc->sc_delayed_count > 0 ||
1467 	    sc->sc_flush_count >= g_journal_accept_immediately) {
1468 		GJ_LOGREQ(4, bp, "DELAYED");
1469 		bioq_insert_tail(&sc->sc_delayed_queue, bp);
1470 		sc->sc_delayed_count++;
1471 		return;
1472 	}
1473 
1474 	KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1475 	    ("DELAYED queue not empty."));
1476 	g_journal_add_current(sc, bp);
1477 }
1478 
1479 static void g_journal_read_done(struct bio *bp);
1480 
1481 /*
1482  * Try to find requested data in cache.
1483  */
1484 static struct bio *
1485 g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1486     off_t oend)
1487 {
1488 	off_t cstart, cend;
1489 	struct bio *bp;
1490 
1491 	GJQ_FOREACH(head, bp) {
1492 		if (bp->bio_offset == -1)
1493 			continue;
1494 		cstart = MAX(ostart, bp->bio_offset);
1495 		cend = MIN(oend, bp->bio_offset + bp->bio_length);
1496 		if (cend <= ostart)
1497 			continue;
1498 		else if (cstart >= oend) {
1499 			if (!sorted)
1500 				continue;
1501 			else {
1502 				bp = NULL;
1503 				break;
1504 			}
1505 		}
1506 		if (bp->bio_data == NULL)
1507 			break;
1508 		GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1509 		    bp);
1510 		bcopy(bp->bio_data + cstart - bp->bio_offset,
1511 		    pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1512 		pbp->bio_completed += cend - cstart;
1513 		if (pbp->bio_completed == pbp->bio_length) {
1514 			/*
1515 			 * Cool, the whole request was in cache, deliver happy
1516 			 * message.
1517 			 */
1518 			g_io_deliver(pbp, 0);
1519 			return (pbp);
1520 		}
1521 		break;
1522 	}
1523 	return (bp);
1524 }
1525 
1526 /*
1527  * This function is used for collecting data on read.
1528  * The complexity is because parts of the data can be stored in four different
1529  * places:
1530  * - in memory - the data not yet send to the active journal provider
1531  * - in the active journal
1532  * - in the inactive journal
1533  * - in the data provider
1534  */
1535 static void
1536 g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1537     off_t oend)
1538 {
1539 	struct bio *bp, *nbp, *head;
1540 	off_t cstart, cend;
1541 	u_int i, sorted = 0;
1542 
1543 	GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1544 
1545 	cstart = cend = -1;
1546 	bp = NULL;
1547 	head = NULL;
1548 	for (i = 1; i <= 5; i++) {
1549 		switch (i) {
1550 		case 1:	/* Not-yet-send data. */
1551 			head = sc->sc_current_queue;
1552 			sorted = 1;
1553 			break;
1554 		case 2: /* Skip flush queue as they are also in active queue */
1555 			continue;
1556 		case 3:	/* Active journal. */
1557 			head = sc->sc_active.jj_queue;
1558 			sorted = 1;
1559 			break;
1560 		case 4:	/* Inactive journal. */
1561 			/*
1562 			 * XXX: Here could be a race with g_journal_lowmem().
1563 			 */
1564 			head = sc->sc_inactive.jj_queue;
1565 			sorted = 1;
1566 			break;
1567 		case 5:	/* In-flight to the data provider. */
1568 			head = sc->sc_copy_queue;
1569 			sorted = 0;
1570 			break;
1571 		default:
1572 			panic("gjournal %s: i=%d", __func__, i);
1573 		}
1574 		bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1575 		if (bp == pbp) { /* Got the whole request. */
1576 			GJ_DEBUG(2, "Got the whole request from %u.", i);
1577 			return;
1578 		} else if (bp != NULL) {
1579 			cstart = MAX(ostart, bp->bio_offset);
1580 			cend = MIN(oend, bp->bio_offset + bp->bio_length);
1581 			GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1582 			    i, (intmax_t)cstart, (intmax_t)cend);
1583 			break;
1584 		}
1585 	}
1586 	if (bp != NULL) {
1587 		if (bp->bio_data == NULL) {
1588 			nbp = g_duplicate_bio(pbp);
1589 			nbp->bio_cflags = GJ_BIO_READ;
1590 			nbp->bio_data =
1591 			    pbp->bio_data + cstart - pbp->bio_offset;
1592 			nbp->bio_offset =
1593 			    bp->bio_joffset + cstart - bp->bio_offset;
1594 			nbp->bio_length = cend - cstart;
1595 			nbp->bio_done = g_journal_read_done;
1596 			g_io_request(nbp, sc->sc_jconsumer);
1597 		}
1598 		/*
1599 		 * If we don't have the whole request yet, call g_journal_read()
1600 		 * recursively.
1601 		 */
1602 		if (ostart < cstart)
1603 			g_journal_read(sc, pbp, ostart, cstart);
1604 		if (oend > cend)
1605 			g_journal_read(sc, pbp, cend, oend);
1606 	} else {
1607 		/*
1608 		 * No data in memory, no data in journal.
1609 		 * Its time for asking data provider.
1610 		 */
1611 		GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1612 		nbp = g_duplicate_bio(pbp);
1613 		nbp->bio_cflags = GJ_BIO_READ;
1614 		nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1615 		nbp->bio_offset = ostart;
1616 		nbp->bio_length = oend - ostart;
1617 		nbp->bio_done = g_journal_read_done;
1618 		g_io_request(nbp, sc->sc_dconsumer);
1619 		/* We have the whole request, return here. */
1620 		return;
1621 	}
1622 }
1623 
1624 /*
1625  * Function responsible for handling finished READ requests.
1626  * Actually, g_std_done() could be used here, the only difference is that we
1627  * log error.
1628  */
1629 static void
1630 g_journal_read_done(struct bio *bp)
1631 {
1632 	struct bio *pbp;
1633 
1634 	KASSERT(bp->bio_cflags == GJ_BIO_READ,
1635 	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1636 
1637 	pbp = bp->bio_parent;
1638 	pbp->bio_inbed++;
1639 	pbp->bio_completed += bp->bio_length;
1640 
1641 	if (bp->bio_error != 0) {
1642 		if (pbp->bio_error == 0)
1643 			pbp->bio_error = bp->bio_error;
1644 		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1645 		    bp->bio_to->name, bp->bio_error);
1646 	}
1647 	g_destroy_bio(bp);
1648 	if (pbp->bio_children == pbp->bio_inbed &&
1649 	    pbp->bio_completed == pbp->bio_length) {
1650 		/* We're done. */
1651 		g_io_deliver(pbp, 0);
1652 	}
1653 }
1654 
1655 /*
1656  * Deactive current journal and active next one.
1657  */
1658 static void
1659 g_journal_switch(struct g_journal_softc *sc)
1660 {
1661 	struct g_provider *pp;
1662 
1663 	if (JEMPTY(sc)) {
1664 		GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1665 		pp = LIST_FIRST(&sc->sc_geom->provider);
1666 		if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1667 			sc->sc_flags |= GJF_DEVICE_CLEAN;
1668 			GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1669 			g_journal_metadata_update(sc);
1670 		}
1671 	} else {
1672 		GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1673 
1674 		pp = sc->sc_jprovider;
1675 
1676 		sc->sc_journal_previous_id = sc->sc_journal_id;
1677 
1678 		sc->sc_journal_id = sc->sc_journal_next_id;
1679 		sc->sc_journal_next_id = arc4random();
1680 
1681 		GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1682 
1683 		g_journal_write_header(sc);
1684 
1685 		sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1686 		sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1687 
1688 		sc->sc_active.jj_offset =
1689 		    sc->sc_journal_offset - pp->sectorsize;
1690 		sc->sc_active.jj_queue = NULL;
1691 
1692 		/*
1693 		 * Switch is done, start copying data from the (now) inactive
1694 		 * journal to the data provider.
1695 		 */
1696 		g_journal_copy_start(sc);
1697 	}
1698 	mtx_lock(&sc->sc_mtx);
1699 	sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1700 	mtx_unlock(&sc->sc_mtx);
1701 }
1702 
1703 static void
1704 g_journal_initialize(struct g_journal_softc *sc)
1705 {
1706 
1707 	sc->sc_journal_id = arc4random();
1708 	sc->sc_journal_next_id = arc4random();
1709 	sc->sc_journal_previous_id = sc->sc_journal_id;
1710 	sc->sc_journal_offset = sc->sc_jstart;
1711 	sc->sc_inactive.jj_offset = sc->sc_jstart;
1712 	g_journal_write_header(sc);
1713 	sc->sc_active.jj_offset = sc->sc_jstart;
1714 }
1715 
1716 static void
1717 g_journal_mark_as_dirty(struct g_journal_softc *sc)
1718 {
1719 	const struct g_journal_desc *desc;
1720 	int i;
1721 
1722 	GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1723 	for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1724 		desc->jd_dirty(sc->sc_dconsumer);
1725 }
1726 
1727 /*
1728  * Function read record header from the given journal.
1729  * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1730  * and data on every call.
1731  */
1732 static int
1733 g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1734     void *data)
1735 {
1736 	int error;
1737 
1738 	g_reset_bio(bp);
1739 	bp->bio_cmd = BIO_READ;
1740 	bp->bio_done = NULL;
1741 	bp->bio_offset = offset;
1742 	bp->bio_length = cp->provider->sectorsize;
1743 	bp->bio_data = data;
1744 	g_io_request(bp, cp);
1745 	error = biowait(bp, "gjs_read");
1746 	return (error);
1747 }
1748 
1749 #if 0
1750 /*
1751  * Function is called when we start the journal device and we detect that
1752  * one of the journals was not fully copied.
1753  * The purpose of this function is to read all records headers from journal
1754  * and placed them in the inactive queue, so we can start journal
1755  * synchronization process and the journal provider itself.
1756  * Design decision was taken to not synchronize the whole journal here as it
1757  * can take too much time. Reading headers only and delaying synchronization
1758  * process until after journal provider is started should be the best choice.
1759  */
1760 #endif
1761 
1762 static void
1763 g_journal_sync(struct g_journal_softc *sc)
1764 {
1765 	struct g_journal_record_header rhdr;
1766 	struct g_journal_entry *ent;
1767 	struct g_journal_header jhdr;
1768 	struct g_consumer *cp;
1769 	struct bio *bp, *fbp, *tbp;
1770 	off_t joffset, offset;
1771 	u_char *buf, sum[16];
1772 	uint64_t id;
1773 	MD5_CTX ctx;
1774 	int error, found, i;
1775 
1776 	found = 0;
1777 	fbp = NULL;
1778 	cp = sc->sc_jconsumer;
1779 	bp = g_alloc_bio();
1780 	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1781 	offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1782 
1783 	GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1784 
1785 	/*
1786 	 * Read and decode first journal header.
1787 	 */
1788 	error = g_journal_sync_read(cp, bp, offset, buf);
1789 	if (error != 0) {
1790 		GJ_DEBUG(0, "Error while reading journal header from %s.",
1791 		    cp->provider->name);
1792 		goto end;
1793 	}
1794 	error = g_journal_header_decode(buf, &jhdr);
1795 	if (error != 0) {
1796 		GJ_DEBUG(0, "Cannot decode journal header from %s.",
1797 		    cp->provider->name);
1798 		goto end;
1799 	}
1800 	id = sc->sc_journal_id;
1801 	if (jhdr.jh_journal_id != sc->sc_journal_id) {
1802 		GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1803 		    (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1804 		goto end;
1805 	}
1806 	offset += cp->provider->sectorsize;
1807 	id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1808 
1809 	for (;;) {
1810 		/*
1811 		 * If the biggest record won't fit, look for a record header or
1812 		 * journal header from the beginning.
1813 		 */
1814 		GJ_VALIDATE_OFFSET(offset, sc);
1815 		error = g_journal_sync_read(cp, bp, offset, buf);
1816 		if (error != 0) {
1817 			/*
1818 			 * Not good. Having an error while reading header
1819 			 * means, that we cannot read next headers and in
1820 			 * consequence we cannot find termination.
1821 			 */
1822 			GJ_DEBUG(0,
1823 			    "Error while reading record header from %s.",
1824 			    cp->provider->name);
1825 			break;
1826 		}
1827 
1828 		error = g_journal_record_header_decode(buf, &rhdr);
1829 		if (error != 0) {
1830 			GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1831 			    (intmax_t)offset, error);
1832 			/*
1833 			 * This is not a record header.
1834 			 * If we are lucky, this is next journal header.
1835 			 */
1836 			error = g_journal_header_decode(buf, &jhdr);
1837 			if (error != 0) {
1838 				GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1839 				    (intmax_t)offset, error);
1840 				/*
1841 				 * Nope, this is not journal header, which
1842 				 * bascially means that journal is not
1843 				 * terminated properly.
1844 				 */
1845 				error = ENOENT;
1846 				break;
1847 			}
1848 			/*
1849 			 * Ok. This is header of _some_ journal. Now we need to
1850 			 * verify if this is header of the _next_ journal.
1851 			 */
1852 			if (jhdr.jh_journal_id != id) {
1853 				GJ_DEBUG(1, "Journal ID mismatch at %jd "
1854 				    "(0x%08x != 0x%08x).", (intmax_t)offset,
1855 				    (u_int)jhdr.jh_journal_id, (u_int)id);
1856 				error = ENOENT;
1857 				break;
1858 			}
1859 
1860 			/* Found termination. */
1861 			found++;
1862 			GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1863 			    (intmax_t)offset, (u_int)id);
1864 			sc->sc_active.jj_offset = offset;
1865 			sc->sc_journal_offset =
1866 			    offset + cp->provider->sectorsize;
1867 			sc->sc_journal_id = id;
1868 			id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1869 
1870 			while ((tbp = fbp) != NULL) {
1871 				fbp = tbp->bio_next;
1872 				GJ_LOGREQ(3, tbp, "Adding request.");
1873 				g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1874 				    tbp, M_WAITOK);
1875 			}
1876 
1877 			/* Skip journal's header. */
1878 			offset += cp->provider->sectorsize;
1879 			continue;
1880 		}
1881 
1882 		/* Skip record's header. */
1883 		offset += cp->provider->sectorsize;
1884 
1885 		/*
1886 		 * Add information about every record entry to the inactive
1887 		 * queue.
1888 		 */
1889 		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1890 			MD5Init(&ctx);
1891 		for (i = 0; i < rhdr.jrh_nentries; i++) {
1892 			ent = &rhdr.jrh_entries[i];
1893 			GJ_DEBUG(3, "Insert entry: %jd %jd.",
1894 			    (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1895 			g_journal_insert(&fbp, ent->je_offset,
1896 			    ent->je_offset + ent->je_length, ent->je_joffset,
1897 			    NULL, M_WAITOK);
1898 			if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1899 				u_char *buf2;
1900 
1901 				/*
1902 				 * TODO: Should use faster function (like
1903 				 *       g_journal_sync_read()).
1904 				 */
1905 				buf2 = g_read_data(cp, offset, ent->je_length,
1906 				    NULL);
1907 				if (buf2 == NULL)
1908 					GJ_DEBUG(0, "Cannot read data at %jd.",
1909 					    (intmax_t)offset);
1910 				else {
1911 					MD5Update(&ctx, buf2, ent->je_length);
1912 					g_free(buf2);
1913 				}
1914 			}
1915 			/* Skip entry's data. */
1916 			offset += ent->je_length;
1917 		}
1918 		if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1919 			MD5Final(sum, &ctx);
1920 			if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1921 				GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1922 				    (intmax_t)offset);
1923 			}
1924 		}
1925 	}
1926 end:
1927 	gj_free(bp->bio_data, cp->provider->sectorsize);
1928 	g_destroy_bio(bp);
1929 
1930 	/* Remove bios from unterminated journal. */
1931 	while ((tbp = fbp) != NULL) {
1932 		fbp = tbp->bio_next;
1933 		g_destroy_bio(tbp);
1934 	}
1935 
1936 	if (found < 1 && joffset > 0) {
1937 		GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1938 		    sc->sc_name);
1939 		while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1940 			sc->sc_inactive.jj_queue = tbp->bio_next;
1941 			g_destroy_bio(tbp);
1942 		}
1943 		g_journal_initialize(sc);
1944 		g_journal_mark_as_dirty(sc);
1945 	} else {
1946 		GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1947 		g_journal_copy_start(sc);
1948 	}
1949 }
1950 
1951 /*
1952  * Wait for requests.
1953  * If we have requests in the current queue, flush them after 3 seconds from the
1954  * last flush. In this way we don't wait forever (or for journal switch) with
1955  * storing not full records on journal.
1956  */
1957 static void
1958 g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1959 {
1960 	int error, timeout;
1961 
1962 	GJ_DEBUG(3, "%s: enter", __func__);
1963 	if (sc->sc_current_count == 0) {
1964 		if (g_journal_debug < 2)
1965 			msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
1966 		else {
1967 			/*
1968 			 * If we have debug turned on, show number of elements
1969 			 * in various queues.
1970 			 */
1971 			for (;;) {
1972 				error = msleep(sc, &sc->sc_mtx, PRIBIO,
1973 				    "gj:work", hz * 3);
1974 				if (error == 0) {
1975 					mtx_unlock(&sc->sc_mtx);
1976 					break;
1977 				}
1978 				GJ_DEBUG(3, "Report: current count=%d",
1979 				    sc->sc_current_count);
1980 				GJ_DEBUG(3, "Report: flush count=%d",
1981 				    sc->sc_flush_count);
1982 				GJ_DEBUG(3, "Report: flush in progress=%d",
1983 				    sc->sc_flush_in_progress);
1984 				GJ_DEBUG(3, "Report: copy in progress=%d",
1985 				    sc->sc_copy_in_progress);
1986 				GJ_DEBUG(3, "Report: delayed=%d",
1987 				    sc->sc_delayed_count);
1988 			}
1989 		}
1990 		GJ_DEBUG(3, "%s: exit 1", __func__);
1991 		return;
1992 	}
1993 
1994 	/*
1995 	 * Flush even not full records every 3 seconds.
1996 	 */
1997 	timeout = (last_write + 3 - time_second) * hz;
1998 	if (timeout <= 0) {
1999 		mtx_unlock(&sc->sc_mtx);
2000 		g_journal_flush(sc);
2001 		g_journal_flush_send(sc);
2002 		GJ_DEBUG(3, "%s: exit 2", __func__);
2003 		return;
2004 	}
2005 	error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
2006 	if (error == EWOULDBLOCK)
2007 		g_journal_flush_send(sc);
2008 	GJ_DEBUG(3, "%s: exit 3", __func__);
2009 }
2010 
2011 /*
2012  * Worker thread.
2013  */
2014 static void
2015 g_journal_worker(void *arg)
2016 {
2017 	struct g_journal_softc *sc;
2018 	struct g_geom *gp;
2019 	struct g_provider *pp;
2020 	struct bio *bp;
2021 	time_t last_write;
2022 	int type;
2023 
2024 	thread_lock(curthread);
2025 	sched_prio(curthread, PRIBIO);
2026 	thread_unlock(curthread);
2027 
2028 	sc = arg;
2029 	type = 0;	/* gcc */
2030 
2031 	if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2032 		GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2033 		g_journal_initialize(sc);
2034 	} else {
2035 		g_journal_sync(sc);
2036 	}
2037 	/*
2038 	 * Check if we can use BIO_FLUSH.
2039 	 */
2040 	sc->sc_bio_flush = 0;
2041 	if (g_io_flush(sc->sc_jconsumer) == 0) {
2042 		sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2043 		GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2044 		    sc->sc_jconsumer->provider->name);
2045 	} else {
2046 		GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2047 		    sc->sc_jconsumer->provider->name);
2048 	}
2049 	if (sc->sc_jconsumer != sc->sc_dconsumer) {
2050 		if (g_io_flush(sc->sc_dconsumer) == 0) {
2051 			sc->sc_bio_flush |= GJ_FLUSH_DATA;
2052 			GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2053 			    sc->sc_dconsumer->provider->name);
2054 		} else {
2055 			GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2056 			    sc->sc_dconsumer->provider->name);
2057 		}
2058 	}
2059 
2060 	gp = sc->sc_geom;
2061 	g_topology_lock();
2062 	pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2063 	pp->mediasize = sc->sc_mediasize;
2064 	/*
2065 	 * There could be a problem when data provider and journal providers
2066 	 * have different sectorsize, but such scenario is prevented on journal
2067 	 * creation.
2068 	 */
2069 	pp->sectorsize = sc->sc_sectorsize;
2070 	g_error_provider(pp, 0);
2071 	g_topology_unlock();
2072 	last_write = time_second;
2073 
2074 	if (sc->sc_rootmount != NULL) {
2075 		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2076 		root_mount_rel(sc->sc_rootmount);
2077 		sc->sc_rootmount = NULL;
2078 	}
2079 
2080 	for (;;) {
2081 		/* Get first request from the queue. */
2082 		mtx_lock(&sc->sc_mtx);
2083 		bp = bioq_first(&sc->sc_back_queue);
2084 		if (bp != NULL)
2085 			type = (bp->bio_cflags & GJ_BIO_MASK);
2086 		if (bp == NULL) {
2087 			bp = bioq_first(&sc->sc_regular_queue);
2088 			if (bp != NULL)
2089 				type = GJ_BIO_REGULAR;
2090 		}
2091 		if (bp == NULL) {
2092 try_switch:
2093 			if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2094 			    (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2095 				if (sc->sc_current_count > 0) {
2096 					mtx_unlock(&sc->sc_mtx);
2097 					g_journal_flush(sc);
2098 					g_journal_flush_send(sc);
2099 					continue;
2100 				}
2101 				if (sc->sc_flush_in_progress > 0)
2102 					goto sleep;
2103 				if (sc->sc_copy_in_progress > 0)
2104 					goto sleep;
2105 			}
2106 			if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2107 				mtx_unlock(&sc->sc_mtx);
2108 				g_journal_switch(sc);
2109 				wakeup(&sc->sc_journal_copying);
2110 				continue;
2111 			}
2112 			if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2113 				GJ_DEBUG(1, "Shutting down worker "
2114 				    "thread for %s.", gp->name);
2115 				sc->sc_worker = NULL;
2116 				wakeup(&sc->sc_worker);
2117 				mtx_unlock(&sc->sc_mtx);
2118 				kproc_exit(0);
2119 			}
2120 sleep:
2121 			g_journal_wait(sc, last_write);
2122 			continue;
2123 		}
2124 		/*
2125 		 * If we're in switch process, we need to delay all new
2126 		 * write requests until its done.
2127 		 */
2128 		if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2129 		    type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2130 			GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2131 			goto try_switch;
2132 		}
2133 		if (type == GJ_BIO_REGULAR)
2134 			bioq_remove(&sc->sc_regular_queue, bp);
2135 		else
2136 			bioq_remove(&sc->sc_back_queue, bp);
2137 		mtx_unlock(&sc->sc_mtx);
2138 		switch (type) {
2139 		case GJ_BIO_REGULAR:
2140 			/* Regular request. */
2141 			switch (bp->bio_cmd) {
2142 			case BIO_READ:
2143 				g_journal_read(sc, bp, bp->bio_offset,
2144 				    bp->bio_offset + bp->bio_length);
2145 				break;
2146 			case BIO_WRITE:
2147 				last_write = time_second;
2148 				g_journal_add_request(sc, bp);
2149 				g_journal_flush_send(sc);
2150 				break;
2151 			default:
2152 				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2153 			}
2154 			break;
2155 		case GJ_BIO_COPY:
2156 			switch (bp->bio_cmd) {
2157 			case BIO_READ:
2158 				if (g_journal_copy_read_done(bp))
2159 					g_journal_copy_send(sc);
2160 				break;
2161 			case BIO_WRITE:
2162 				g_journal_copy_write_done(bp);
2163 				g_journal_copy_send(sc);
2164 				break;
2165 			default:
2166 				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2167 			}
2168 			break;
2169 		case GJ_BIO_JOURNAL:
2170 			g_journal_flush_done(bp);
2171 			g_journal_flush_send(sc);
2172 			break;
2173 		case GJ_BIO_READ:
2174 		default:
2175 			panic("Invalid bio (%d).", type);
2176 		}
2177 	}
2178 }
2179 
2180 static void
2181 g_journal_destroy_event(void *arg, int flags __unused)
2182 {
2183 	struct g_journal_softc *sc;
2184 
2185 	g_topology_assert();
2186 	sc = arg;
2187 	g_journal_destroy(sc);
2188 }
2189 
2190 static void
2191 g_journal_timeout(void *arg)
2192 {
2193 	struct g_journal_softc *sc;
2194 
2195 	sc = arg;
2196 	GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2197 	    sc->sc_geom->name);
2198 	g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2199 }
2200 
2201 static struct g_geom *
2202 g_journal_create(struct g_class *mp, struct g_provider *pp,
2203     const struct g_journal_metadata *md)
2204 {
2205 	struct g_journal_softc *sc;
2206 	struct g_geom *gp;
2207 	struct g_consumer *cp;
2208 	int error;
2209 
2210 	sc = NULL;	/* gcc */
2211 
2212 	g_topology_assert();
2213 	/*
2214 	 * There are two possibilities:
2215 	 * 1. Data and both journals are on the same provider.
2216 	 * 2. Data and journals are all on separated providers.
2217 	 */
2218 	/* Look for journal device with the same ID. */
2219 	LIST_FOREACH(gp, &mp->geom, geom) {
2220 		sc = gp->softc;
2221 		if (sc == NULL)
2222 			continue;
2223 		if (sc->sc_id == md->md_id)
2224 			break;
2225 	}
2226 	if (gp == NULL)
2227 		sc = NULL;
2228 	else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2229 		GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2230 		return (NULL);
2231 	}
2232 	if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2233 		GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2234 		return (NULL);
2235 	}
2236 	if (md->md_type & GJ_TYPE_DATA) {
2237 		GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2238 		    pp->name);
2239 	}
2240 	if (md->md_type & GJ_TYPE_JOURNAL) {
2241 		GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2242 		    pp->name);
2243 	}
2244 
2245 	if (sc == NULL) {
2246 		/* Action geom. */
2247 		sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2248 		sc->sc_id = md->md_id;
2249 		sc->sc_type = 0;
2250 		sc->sc_flags = 0;
2251 		sc->sc_worker = NULL;
2252 
2253 		gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2254 		gp->start = g_journal_start;
2255 		gp->orphan = g_journal_orphan;
2256 		gp->access = g_journal_access;
2257 		gp->softc = sc;
2258 		gp->flags |= G_GEOM_VOLATILE_BIO;
2259 		sc->sc_geom = gp;
2260 
2261 		mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2262 
2263 		bioq_init(&sc->sc_back_queue);
2264 		bioq_init(&sc->sc_regular_queue);
2265 		bioq_init(&sc->sc_delayed_queue);
2266 		sc->sc_delayed_count = 0;
2267 		sc->sc_current_queue = NULL;
2268 		sc->sc_current_count = 0;
2269 		sc->sc_flush_queue = NULL;
2270 		sc->sc_flush_count = 0;
2271 		sc->sc_flush_in_progress = 0;
2272 		sc->sc_copy_queue = NULL;
2273 		sc->sc_copy_in_progress = 0;
2274 		sc->sc_inactive.jj_queue = NULL;
2275 		sc->sc_active.jj_queue = NULL;
2276 
2277 		sc->sc_rootmount = root_mount_hold("GJOURNAL");
2278 		GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2279 
2280 		callout_init(&sc->sc_callout, 1);
2281 		if (md->md_type != GJ_TYPE_COMPLETE) {
2282 			/*
2283 			 * Journal and data are on separate providers.
2284 			 * At this point we have only one of them.
2285 			 * We setup a timeout in case the other part will not
2286 			 * appear, so we won't wait forever.
2287 			 */
2288 			callout_reset(&sc->sc_callout, 5 * hz,
2289 			    g_journal_timeout, sc);
2290 		}
2291 	}
2292 
2293 	/* Remember type of the data provider. */
2294 	if (md->md_type & GJ_TYPE_DATA)
2295 		sc->sc_orig_type = md->md_type;
2296 	sc->sc_type |= md->md_type;
2297 	cp = NULL;
2298 
2299 	if (md->md_type & GJ_TYPE_DATA) {
2300 		if (md->md_flags & GJ_FLAG_CLEAN)
2301 			sc->sc_flags |= GJF_DEVICE_CLEAN;
2302 		if (md->md_flags & GJ_FLAG_CHECKSUM)
2303 			sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2304 		cp = g_new_consumer(gp);
2305 		error = g_attach(cp, pp);
2306 		KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2307 		    pp->name, error));
2308 		error = g_access(cp, 1, 1, 1);
2309 		if (error != 0) {
2310 			GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2311 			    error);
2312 			g_journal_destroy(sc);
2313 			return (NULL);
2314 		}
2315 		sc->sc_dconsumer = cp;
2316 		sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2317 		sc->sc_sectorsize = pp->sectorsize;
2318 		sc->sc_jstart = md->md_jstart;
2319 		sc->sc_jend = md->md_jend;
2320 		if (md->md_provider[0] != '\0')
2321 			sc->sc_flags |= GJF_DEVICE_HARDCODED;
2322 		sc->sc_journal_offset = md->md_joffset;
2323 		sc->sc_journal_id = md->md_jid;
2324 		sc->sc_journal_previous_id = md->md_jid;
2325 	}
2326 	if (md->md_type & GJ_TYPE_JOURNAL) {
2327 		if (cp == NULL) {
2328 			cp = g_new_consumer(gp);
2329 			error = g_attach(cp, pp);
2330 			KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2331 			    pp->name, error));
2332 			error = g_access(cp, 1, 1, 1);
2333 			if (error != 0) {
2334 				GJ_DEBUG(0, "Cannot access %s (error=%d).",
2335 				    pp->name, error);
2336 				g_journal_destroy(sc);
2337 				return (NULL);
2338 			}
2339 		} else {
2340 			/*
2341 			 * Journal is on the same provider as data, which means
2342 			 * that data provider ends where journal starts.
2343 			 */
2344 			sc->sc_mediasize = md->md_jstart;
2345 		}
2346 		sc->sc_jconsumer = cp;
2347 	}
2348 
2349 	/* Start switcher kproc if needed. */
2350 	if (g_journal_switcher_proc == NULL)
2351 		g_journal_start_switcher(mp);
2352 
2353 	if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2354 		/* Journal is not complete yet. */
2355 		return (gp);
2356 	} else {
2357 		/* Journal complete, cancel timeout. */
2358 		callout_drain(&sc->sc_callout);
2359 	}
2360 
2361 	error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2362 	    "g_journal %s", sc->sc_name);
2363 	if (error != 0) {
2364 		GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2365 		    sc->sc_name);
2366 		g_journal_destroy(sc);
2367 		return (NULL);
2368 	}
2369 
2370 	return (gp);
2371 }
2372 
2373 static void
2374 g_journal_destroy_consumer(void *arg, int flags __unused)
2375 {
2376 	struct g_consumer *cp;
2377 
2378 	g_topology_assert();
2379 	cp = arg;
2380 	g_detach(cp);
2381 	g_destroy_consumer(cp);
2382 }
2383 
2384 static int
2385 g_journal_destroy(struct g_journal_softc *sc)
2386 {
2387 	struct g_geom *gp;
2388 	struct g_provider *pp;
2389 	struct g_consumer *cp;
2390 
2391 	g_topology_assert();
2392 
2393 	if (sc == NULL)
2394 		return (ENXIO);
2395 
2396 	gp = sc->sc_geom;
2397 	pp = LIST_FIRST(&gp->provider);
2398 	if (pp != NULL) {
2399 		if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2400 			GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2401 			    pp->name, pp->acr, pp->acw, pp->ace);
2402 			return (EBUSY);
2403 		}
2404 		g_error_provider(pp, ENXIO);
2405 
2406 		g_journal_flush(sc);
2407 		g_journal_flush_send(sc);
2408 		g_journal_switch(sc);
2409 	}
2410 
2411 	sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2412 
2413 	g_topology_unlock();
2414 
2415 	if (sc->sc_rootmount != NULL) {
2416 		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2417 		root_mount_rel(sc->sc_rootmount);
2418 		sc->sc_rootmount = NULL;
2419 	}
2420 
2421 	callout_drain(&sc->sc_callout);
2422 	mtx_lock(&sc->sc_mtx);
2423 	wakeup(sc);
2424 	while (sc->sc_worker != NULL)
2425 		msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2426 	mtx_unlock(&sc->sc_mtx);
2427 
2428 	if (pp != NULL) {
2429 		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2430 		g_journal_metadata_update(sc);
2431 		g_topology_lock();
2432 		g_wither_provider(pp, ENXIO);
2433 	} else {
2434 		g_topology_lock();
2435 	}
2436 	mtx_destroy(&sc->sc_mtx);
2437 
2438 	if (sc->sc_current_count != 0) {
2439 		GJ_DEBUG(0, "Warning! Number of current requests %d.",
2440 		    sc->sc_current_count);
2441 	}
2442 
2443 	gp->softc = NULL;
2444 	LIST_FOREACH(cp, &gp->consumer, consumer) {
2445 		if (cp->acr + cp->acw + cp->ace > 0)
2446 			g_access(cp, -1, -1, -1);
2447 		/*
2448 		 * We keep all consumers open for writting, so if I'll detach
2449 		 * and destroy consumer here, I'll get providers for taste, so
2450 		 * journal will be started again.
2451 		 * Sending an event here, prevents this from happening.
2452 		 */
2453 		g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2454 	}
2455 	g_wither_geom(gp, ENXIO);
2456 	free(sc, M_JOURNAL);
2457 	return (0);
2458 }
2459 
2460 static void
2461 g_journal_taste_orphan(struct g_consumer *cp)
2462 {
2463 
2464 	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2465 	    cp->provider->name));
2466 }
2467 
2468 static struct g_geom *
2469 g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2470 {
2471 	struct g_journal_metadata md;
2472 	struct g_consumer *cp;
2473 	struct g_geom *gp;
2474 	int error;
2475 
2476 	g_topology_assert();
2477 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2478 	GJ_DEBUG(2, "Tasting %s.", pp->name);
2479 	if (pp->geom->class == mp)
2480 		return (NULL);
2481 
2482 	gp = g_new_geomf(mp, "journal:taste");
2483 	/* This orphan function should be never called. */
2484 	gp->orphan = g_journal_taste_orphan;
2485 	cp = g_new_consumer(gp);
2486 	g_attach(cp, pp);
2487 	error = g_journal_metadata_read(cp, &md);
2488 	g_detach(cp);
2489 	g_destroy_consumer(cp);
2490 	g_destroy_geom(gp);
2491 	if (error != 0)
2492 		return (NULL);
2493 	gp = NULL;
2494 
2495 	if (md.md_provider[0] != '\0' &&
2496 	    !g_compare_names(md.md_provider, pp->name))
2497 		return (NULL);
2498 	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2499 		return (NULL);
2500 	if (g_journal_debug >= 2)
2501 		journal_metadata_dump(&md);
2502 
2503 	gp = g_journal_create(mp, pp, &md);
2504 	return (gp);
2505 }
2506 
2507 static struct g_journal_softc *
2508 g_journal_find_device(struct g_class *mp, const char *name)
2509 {
2510 	struct g_journal_softc *sc;
2511 	struct g_geom *gp;
2512 	struct g_provider *pp;
2513 
2514 	if (strncmp(name, _PATH_DEV, 5) == 0)
2515 		name += 5;
2516 	LIST_FOREACH(gp, &mp->geom, geom) {
2517 		sc = gp->softc;
2518 		if (sc == NULL)
2519 			continue;
2520 		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2521 			continue;
2522 		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2523 			continue;
2524 		pp = LIST_FIRST(&gp->provider);
2525 		if (strcmp(sc->sc_name, name) == 0)
2526 			return (sc);
2527 		if (pp != NULL && strcmp(pp->name, name) == 0)
2528 			return (sc);
2529 	}
2530 	return (NULL);
2531 }
2532 
2533 static void
2534 g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2535 {
2536 	struct g_journal_softc *sc;
2537 	const char *name;
2538 	char param[16];
2539 	int *nargs;
2540 	int error, i;
2541 
2542 	g_topology_assert();
2543 
2544 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2545 	if (nargs == NULL) {
2546 		gctl_error(req, "No '%s' argument.", "nargs");
2547 		return;
2548 	}
2549 	if (*nargs <= 0) {
2550 		gctl_error(req, "Missing device(s).");
2551 		return;
2552 	}
2553 
2554 	for (i = 0; i < *nargs; i++) {
2555 		snprintf(param, sizeof(param), "arg%d", i);
2556 		name = gctl_get_asciiparam(req, param);
2557 		if (name == NULL) {
2558 			gctl_error(req, "No 'arg%d' argument.", i);
2559 			return;
2560 		}
2561 		sc = g_journal_find_device(mp, name);
2562 		if (sc == NULL) {
2563 			gctl_error(req, "No such device: %s.", name);
2564 			return;
2565 		}
2566 		error = g_journal_destroy(sc);
2567 		if (error != 0) {
2568 			gctl_error(req, "Cannot destroy device %s (error=%d).",
2569 			    LIST_FIRST(&sc->sc_geom->provider)->name, error);
2570 			return;
2571 		}
2572 	}
2573 }
2574 
2575 static void
2576 g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2577 {
2578 
2579 	g_topology_assert();
2580 	g_topology_unlock();
2581 	g_journal_sync_requested++;
2582 	wakeup(&g_journal_switcher_state);
2583 	while (g_journal_sync_requested > 0)
2584 		tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2585 	g_topology_lock();
2586 }
2587 
2588 static void
2589 g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2590 {
2591 	uint32_t *version;
2592 
2593 	g_topology_assert();
2594 
2595 	version = gctl_get_paraml(req, "version", sizeof(*version));
2596 	if (version == NULL) {
2597 		gctl_error(req, "No '%s' argument.", "version");
2598 		return;
2599 	}
2600 	if (*version != G_JOURNAL_VERSION) {
2601 		gctl_error(req, "Userland and kernel parts are out of sync.");
2602 		return;
2603 	}
2604 
2605 	if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2606 		g_journal_ctl_destroy(req, mp);
2607 		return;
2608 	} else if (strcmp(verb, "sync") == 0) {
2609 		g_journal_ctl_sync(req, mp);
2610 		return;
2611 	}
2612 
2613 	gctl_error(req, "Unknown verb.");
2614 }
2615 
2616 static void
2617 g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2618     struct g_consumer *cp, struct g_provider *pp)
2619 {
2620 	struct g_journal_softc *sc;
2621 
2622 	g_topology_assert();
2623 
2624 	sc = gp->softc;
2625 	if (sc == NULL)
2626 		return;
2627 	if (pp != NULL) {
2628 		/* Nothing here. */
2629 	} else if (cp != NULL) {
2630 		int first = 1;
2631 
2632 		sbuf_printf(sb, "%s<Role>", indent);
2633 		if (cp == sc->sc_dconsumer) {
2634 			sbuf_cat(sb, "Data");
2635 			first = 0;
2636 		}
2637 		if (cp == sc->sc_jconsumer) {
2638 			if (!first)
2639 				sbuf_cat(sb, ",");
2640 			sbuf_cat(sb, "Journal");
2641 		}
2642 		sbuf_cat(sb, "</Role>\n");
2643 		if (cp == sc->sc_jconsumer) {
2644 			sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2645 			    (intmax_t)sc->sc_jstart);
2646 			sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2647 			    (intmax_t)sc->sc_jend);
2648 		}
2649 	} else {
2650 		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2651 	}
2652 }
2653 
2654 static eventhandler_tag g_journal_event_shutdown = NULL;
2655 static eventhandler_tag g_journal_event_lowmem = NULL;
2656 
2657 static void
2658 g_journal_shutdown(void *arg, int howto __unused)
2659 {
2660 	struct g_class *mp;
2661 	struct g_geom *gp, *gp2;
2662 
2663 	if (KERNEL_PANICKED())
2664 		return;
2665 	mp = arg;
2666 	g_topology_lock();
2667 	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2668 		if (gp->softc == NULL)
2669 			continue;
2670 		GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2671 		g_journal_destroy(gp->softc);
2672 	}
2673 	g_topology_unlock();
2674 }
2675 
2676 /*
2677  * Free cached requests from inactive queue in case of low memory.
2678  * We free GJ_FREE_AT_ONCE elements at once.
2679  */
2680 #define	GJ_FREE_AT_ONCE	4
2681 static void
2682 g_journal_lowmem(void *arg, int howto __unused)
2683 {
2684 	struct g_journal_softc *sc;
2685 	struct g_class *mp;
2686 	struct g_geom *gp;
2687 	struct bio *bp;
2688 	u_int nfree = GJ_FREE_AT_ONCE;
2689 
2690 	g_journal_stats_low_mem++;
2691 	mp = arg;
2692 	g_topology_lock();
2693 	LIST_FOREACH(gp, &mp->geom, geom) {
2694 		sc = gp->softc;
2695 		if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2696 			continue;
2697 		mtx_lock(&sc->sc_mtx);
2698 		for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2699 		    nfree--, bp = bp->bio_next) {
2700 			/*
2701 			 * This is safe to free the bio_data, because:
2702 			 * 1. If bio_data is NULL it will be read from the
2703 			 *    inactive journal.
2704 			 * 2. If bp is sent down, it is first removed from the
2705 			 *    inactive queue, so it's impossible to free the
2706 			 *    data from under in-flight bio.
2707 			 * On the other hand, freeing elements from the active
2708 			 * queue, is not safe.
2709 			 */
2710 			if (bp->bio_data != NULL) {
2711 				GJ_DEBUG(2, "Freeing data from %s.",
2712 				    sc->sc_name);
2713 				gj_free(bp->bio_data, bp->bio_length);
2714 				bp->bio_data = NULL;
2715 			}
2716 		}
2717 		mtx_unlock(&sc->sc_mtx);
2718 		if (nfree == 0)
2719 			break;
2720 	}
2721 	g_topology_unlock();
2722 }
2723 
2724 static void g_journal_switcher(void *arg);
2725 
2726 static void
2727 g_journal_init(struct g_class *mp)
2728 {
2729 
2730 	/* Pick a conservative value if provided value sucks. */
2731 	if (g_journal_cache_divisor <= 0 ||
2732 	    (vm_kmem_size / g_journal_cache_divisor == 0)) {
2733 		g_journal_cache_divisor = 5;
2734 	}
2735 	if (g_journal_cache_limit > 0) {
2736 		g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2737 		g_journal_cache_low =
2738 		    (g_journal_cache_limit / 100) * g_journal_cache_switch;
2739 	}
2740 	g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2741 	    g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2742 	if (g_journal_event_shutdown == NULL)
2743 		GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2744 	g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2745 	    g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2746 	if (g_journal_event_lowmem == NULL)
2747 		GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2748 }
2749 
2750 static void
2751 g_journal_fini(struct g_class *mp)
2752 {
2753 
2754 	if (g_journal_event_shutdown != NULL) {
2755 		EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2756 		    g_journal_event_shutdown);
2757 	}
2758 	if (g_journal_event_lowmem != NULL)
2759 		EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2760 	if (g_journal_switcher_proc != NULL)
2761 		g_journal_stop_switcher();
2762 }
2763 
2764 DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2765 
2766 static const struct g_journal_desc *
2767 g_journal_find_desc(const char *fstype)
2768 {
2769 	const struct g_journal_desc *desc;
2770 	int i;
2771 
2772 	for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2773 	     desc = g_journal_filesystems[++i]) {
2774 		if (strcmp(desc->jd_fstype, fstype) == 0)
2775 			break;
2776 	}
2777 	return (desc);
2778 }
2779 
2780 static void
2781 g_journal_switch_wait(struct g_journal_softc *sc)
2782 {
2783 	struct bintime bt;
2784 
2785 	mtx_assert(&sc->sc_mtx, MA_OWNED);
2786 	if (g_journal_debug >= 2) {
2787 		if (sc->sc_flush_in_progress > 0) {
2788 			GJ_DEBUG(2, "%d requests flushing.",
2789 			    sc->sc_flush_in_progress);
2790 		}
2791 		if (sc->sc_copy_in_progress > 0) {
2792 			GJ_DEBUG(2, "%d requests copying.",
2793 			    sc->sc_copy_in_progress);
2794 		}
2795 		if (sc->sc_flush_count > 0) {
2796 			GJ_DEBUG(2, "%d requests to flush.",
2797 			    sc->sc_flush_count);
2798 		}
2799 		if (sc->sc_delayed_count > 0) {
2800 			GJ_DEBUG(2, "%d requests delayed.",
2801 			    sc->sc_delayed_count);
2802 		}
2803 	}
2804 	g_journal_stats_switches++;
2805 	if (sc->sc_copy_in_progress > 0)
2806 		g_journal_stats_wait_for_copy++;
2807 	GJ_TIMER_START(1, &bt);
2808 	sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2809 	sc->sc_flags |= GJF_DEVICE_SWITCH;
2810 	wakeup(sc);
2811 	while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2812 		msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2813 		    "gj:switch", 0);
2814 	}
2815 	GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2816 }
2817 
2818 static void
2819 g_journal_do_switch(struct g_class *classp)
2820 {
2821 	struct g_journal_softc *sc;
2822 	const struct g_journal_desc *desc;
2823 	struct g_geom *gp;
2824 	struct mount *mp;
2825 	struct bintime bt;
2826 	char *mountpoint;
2827 	int error, save;
2828 
2829 	g_topology_lock();
2830 	LIST_FOREACH(gp, &classp->geom, geom) {
2831 		sc = gp->softc;
2832 		if (sc == NULL)
2833 			continue;
2834 		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2835 			continue;
2836 		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2837 			continue;
2838 		mtx_lock(&sc->sc_mtx);
2839 		sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2840 		mtx_unlock(&sc->sc_mtx);
2841 	}
2842 	g_topology_unlock();
2843 
2844 	mtx_lock(&mountlist_mtx);
2845 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2846 		if (mp->mnt_gjprovider == NULL)
2847 			continue;
2848 		if (mp->mnt_flag & MNT_RDONLY)
2849 			continue;
2850 		desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2851 		if (desc == NULL)
2852 			continue;
2853 		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2854 			continue;
2855 		/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2856 
2857 		g_topology_lock();
2858 		sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2859 		g_topology_unlock();
2860 
2861 		if (sc == NULL) {
2862 			GJ_DEBUG(0, "Cannot find journal geom for %s.",
2863 			    mp->mnt_gjprovider);
2864 			goto next;
2865 		} else if (JEMPTY(sc)) {
2866 			mtx_lock(&sc->sc_mtx);
2867 			sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2868 			mtx_unlock(&sc->sc_mtx);
2869 			GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2870 			goto next;
2871 		}
2872 
2873 		mountpoint = mp->mnt_stat.f_mntonname;
2874 
2875 		error = vn_start_write(NULL, &mp, V_WAIT);
2876 		if (error != 0) {
2877 			GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2878 			    mountpoint, error);
2879 			goto next;
2880 		}
2881 
2882 		save = curthread_pflags_set(TDP_SYNCIO);
2883 
2884 		GJ_TIMER_START(1, &bt);
2885 		vfs_periodic(mp, MNT_NOWAIT);
2886 		GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2887 
2888 		GJ_TIMER_START(1, &bt);
2889 		error = VFS_SYNC(mp, MNT_NOWAIT);
2890 		if (error == 0)
2891 			GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2892 		else {
2893 			GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2894 			    mountpoint, error);
2895 		}
2896 
2897 		curthread_pflags_restore(save);
2898 
2899 		vn_finished_write(mp);
2900 
2901 		if (error != 0)
2902 			goto next;
2903 
2904 		/*
2905 		 * Send BIO_FLUSH before freezing the file system, so it can be
2906 		 * faster after the freeze.
2907 		 */
2908 		GJ_TIMER_START(1, &bt);
2909 		g_journal_flush_cache(sc);
2910 		GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2911 
2912 		GJ_TIMER_START(1, &bt);
2913 		error = vfs_write_suspend(mp, VS_SKIP_UNMOUNT);
2914 		GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2915 		if (error != 0) {
2916 			GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2917 			    mountpoint, error);
2918 			goto next;
2919 		}
2920 
2921 		error = desc->jd_clean(mp);
2922 		if (error != 0)
2923 			goto next;
2924 
2925 		mtx_lock(&sc->sc_mtx);
2926 		g_journal_switch_wait(sc);
2927 		mtx_unlock(&sc->sc_mtx);
2928 
2929 		vfs_write_resume(mp, 0);
2930 next:
2931 		mtx_lock(&mountlist_mtx);
2932 		vfs_unbusy(mp);
2933 	}
2934 	mtx_unlock(&mountlist_mtx);
2935 
2936 	sc = NULL;
2937 	for (;;) {
2938 		g_topology_lock();
2939 		LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2940 			sc = gp->softc;
2941 			if (sc == NULL)
2942 				continue;
2943 			mtx_lock(&sc->sc_mtx);
2944 			if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2945 			    !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2946 			    (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2947 				break;
2948 			}
2949 			mtx_unlock(&sc->sc_mtx);
2950 			sc = NULL;
2951 		}
2952 		g_topology_unlock();
2953 		if (sc == NULL)
2954 			break;
2955 		mtx_assert(&sc->sc_mtx, MA_OWNED);
2956 		g_journal_switch_wait(sc);
2957 		mtx_unlock(&sc->sc_mtx);
2958 	}
2959 }
2960 
2961 static void
2962 g_journal_start_switcher(struct g_class *mp)
2963 {
2964 	int error;
2965 
2966 	g_topology_assert();
2967 	MPASS(g_journal_switcher_proc == NULL);
2968 	g_journal_switcher_state = GJ_SWITCHER_WORKING;
2969 	error = kproc_create(g_journal_switcher, mp, &g_journal_switcher_proc,
2970 	    0, 0, "g_journal switcher");
2971 	KASSERT(error == 0, ("Cannot create switcher thread."));
2972 }
2973 
2974 static void
2975 g_journal_stop_switcher(void)
2976 {
2977 	g_topology_assert();
2978 	MPASS(g_journal_switcher_proc != NULL);
2979 	g_journal_switcher_state = GJ_SWITCHER_DIE;
2980 	wakeup(&g_journal_switcher_state);
2981 	while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2982 		tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2983 	GJ_DEBUG(1, "Switcher died.");
2984 	g_journal_switcher_proc = NULL;
2985 }
2986 
2987 /*
2988  * TODO: Kill switcher thread on last geom destruction?
2989  */
2990 static void
2991 g_journal_switcher(void *arg)
2992 {
2993 	struct g_class *mp;
2994 	struct bintime bt;
2995 	int error;
2996 
2997 	mp = arg;
2998 	curthread->td_pflags |= TDP_NORUNNINGBUF;
2999 	for (;;) {
3000 		g_journal_switcher_wokenup = 0;
3001 		error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3002 		    g_journal_switch_time * hz);
3003 		if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3004 			g_journal_switcher_state = GJ_SWITCHER_DIED;
3005 			GJ_DEBUG(1, "Switcher exiting.");
3006 			wakeup(&g_journal_switcher_state);
3007 			kproc_exit(0);
3008 		}
3009 		if (error == 0 && g_journal_sync_requested == 0) {
3010 			GJ_DEBUG(1, "Out of cache, force switch (used=%jd "
3011 			    "limit=%jd).", (intmax_t)g_journal_cache_used,
3012 			    (intmax_t)g_journal_cache_limit);
3013 		}
3014 		GJ_TIMER_START(1, &bt);
3015 		g_journal_do_switch(mp);
3016 		GJ_TIMER_STOP(1, &bt, "Entire switch time");
3017 		if (g_journal_sync_requested > 0) {
3018 			g_journal_sync_requested = 0;
3019 			wakeup(&g_journal_sync_requested);
3020 		}
3021 	}
3022 }
3023