xref: /freebsd/sys/geom/journal/g_journal.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/eventhandler.h>
42 #include <sys/proc.h>
43 #include <sys/kthread.h>
44 #include <sys/sched.h>
45 #include <sys/taskqueue.h>
46 #include <sys/vnode.h>
47 #include <sys/sbuf.h>
48 #ifdef GJ_MEMDEBUG
49 #include <sys/stack.h>
50 #include <sys/kdb.h>
51 #endif
52 #include <vm/vm.h>
53 #include <vm/vm_kern.h>
54 #include <geom/geom.h>
55 #include <geom/geom_dbg.h>
56 
57 #include <geom/journal/g_journal.h>
58 
59 FEATURE(geom_journal, "GEOM journaling support");
60 
61 /*
62  * On-disk journal format:
63  *
64  * JH - Journal header
65  * RH - Record header
66  *
67  * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
68  * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
69  * %%%%%% ****** +------+ +------+     ****** +------+     %%%%%%
70  *
71  */
72 
73 CTASSERT(sizeof(struct g_journal_header) <= 512);
74 CTASSERT(sizeof(struct g_journal_record_header) <= 512);
75 
76 static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
77 static struct mtx g_journal_cache_mtx;
78 MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
79 
80 const struct g_journal_desc *g_journal_filesystems[] = {
81 	&g_journal_ufs,
82 	NULL
83 };
84 
85 SYSCTL_DECL(_kern_geom);
86 
87 int g_journal_debug = 0;
88 static u_int g_journal_switch_time = 10;
89 static u_int g_journal_force_switch = 70;
90 static u_int g_journal_parallel_flushes = 16;
91 static u_int g_journal_parallel_copies = 16;
92 static u_int g_journal_accept_immediately = 64;
93 static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
94 static u_int g_journal_do_optimize = 1;
95 
96 static SYSCTL_NODE(_kern_geom, OID_AUTO, journal,
97     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
98     "GEOM_JOURNAL stuff");
99 SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RWTUN, &g_journal_debug, 0,
100     "Debug level");
101 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
102     &g_journal_switch_time, 0, "Switch journals every N seconds");
103 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
104     &g_journal_force_switch, 0, "Force switch when journal is N% full");
105 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
106     &g_journal_parallel_flushes, 0,
107     "Number of flush I/O requests to send in parallel");
108 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
109     &g_journal_accept_immediately, 0,
110     "Number of I/O requests accepted immediately");
111 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
112     &g_journal_parallel_copies, 0,
113     "Number of copy I/O requests to send in parallel");
114 static int
115 g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
116 {
117 	u_int entries;
118 	int error;
119 
120 	entries = g_journal_record_entries;
121 	error = sysctl_handle_int(oidp, &entries, 0, req);
122 	if (error != 0 || req->newptr == NULL)
123 		return (error);
124 	if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
125 		return (EINVAL);
126 	g_journal_record_entries = entries;
127 	return (0);
128 }
129 SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
130     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
131     g_journal_record_entries_sysctl, "I",
132     "Maximum number of entires in one journal record");
133 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
134     &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
135 
136 static u_long g_journal_cache_used = 0;
137 static u_long g_journal_cache_limit = 64 * 1024 * 1024;
138 static u_int g_journal_cache_divisor = 2;
139 static u_int g_journal_cache_switch = 90;
140 static u_int g_journal_cache_misses = 0;
141 static u_int g_journal_cache_alloc_failures = 0;
142 static u_long g_journal_cache_low = 0;
143 
144 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache,
145     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
146     "GEOM_JOURNAL cache");
147 SYSCTL_ULONG(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
148     &g_journal_cache_used, 0, "Number of allocated bytes");
149 static int
150 g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
151 {
152 	u_long limit;
153 	int error;
154 
155 	limit = g_journal_cache_limit;
156 	error = sysctl_handle_long(oidp, &limit, 0, req);
157 	if (error != 0 || req->newptr == NULL)
158 		return (error);
159 	g_journal_cache_limit = limit;
160 	g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
161 	return (0);
162 }
163 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
164     CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 0,
165     g_journal_cache_limit_sysctl, "I",
166     "Maximum number of allocated bytes");
167 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
168     &g_journal_cache_divisor, 0,
169     "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
170 static int
171 g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
172 {
173 	u_int cswitch;
174 	int error;
175 
176 	cswitch = g_journal_cache_switch;
177 	error = sysctl_handle_int(oidp, &cswitch, 0, req);
178 	if (error != 0 || req->newptr == NULL)
179 		return (error);
180 	if (cswitch > 100)
181 		return (EINVAL);
182 	g_journal_cache_switch = cswitch;
183 	g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
184 	return (0);
185 }
186 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
187     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
188     g_journal_cache_switch_sysctl, "I",
189     "Force switch when we hit this percent of cache use");
190 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
191     &g_journal_cache_misses, 0, "Number of cache misses");
192 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
193     &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
194 
195 static u_long g_journal_stats_bytes_skipped = 0;
196 static u_long g_journal_stats_combined_ios = 0;
197 static u_long g_journal_stats_switches = 0;
198 static u_long g_journal_stats_wait_for_copy = 0;
199 static u_long g_journal_stats_journal_full = 0;
200 static u_long g_journal_stats_low_mem = 0;
201 
202 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats,
203     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
204     "GEOM_JOURNAL statistics");
205 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
206     &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
207 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
208     &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
209 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
210     &g_journal_stats_switches, 0, "Number of journal switches");
211 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
212     &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
213 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
214     &g_journal_stats_journal_full, 0,
215     "Number of times journal was almost full.");
216 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
217     &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
218 
219 static g_taste_t g_journal_taste;
220 static g_ctl_req_t g_journal_config;
221 static g_dumpconf_t g_journal_dumpconf;
222 static g_init_t g_journal_init;
223 static g_fini_t g_journal_fini;
224 
225 struct g_class g_journal_class = {
226 	.name = G_JOURNAL_CLASS_NAME,
227 	.version = G_VERSION,
228 	.taste = g_journal_taste,
229 	.ctlreq = g_journal_config,
230 	.dumpconf = g_journal_dumpconf,
231 	.init = g_journal_init,
232 	.fini = g_journal_fini
233 };
234 
235 static int g_journal_destroy(struct g_journal_softc *sc);
236 static void g_journal_metadata_update(struct g_journal_softc *sc);
237 static void g_journal_start_switcher(struct g_class *mp);
238 static void g_journal_stop_switcher(void);
239 static void g_journal_switch_wait(struct g_journal_softc *sc);
240 
241 #define	GJ_SWITCHER_WORKING	0
242 #define	GJ_SWITCHER_DIE		1
243 #define	GJ_SWITCHER_DIED	2
244 static struct proc *g_journal_switcher_proc = NULL;
245 static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
246 static int g_journal_switcher_wokenup = 0;
247 static int g_journal_sync_requested = 0;
248 
249 #ifdef GJ_MEMDEBUG
250 struct meminfo {
251 	size_t		mi_size;
252 	struct stack	mi_stack;
253 };
254 #endif
255 
256 /*
257  * We use our own malloc/realloc/free functions, so we can collect statistics
258  * and force journal switch when we're running out of cache.
259  */
260 static void *
261 gj_malloc(size_t size, int flags)
262 {
263 	void *p;
264 #ifdef GJ_MEMDEBUG
265 	struct meminfo *mi;
266 #endif
267 
268 	mtx_lock(&g_journal_cache_mtx);
269 	if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
270 	    g_journal_cache_used + size > g_journal_cache_low) {
271 		GJ_DEBUG(1, "No cache, waking up the switcher.");
272 		g_journal_switcher_wokenup = 1;
273 		wakeup(&g_journal_switcher_state);
274 	}
275 	if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
276 	    g_journal_cache_used + size > g_journal_cache_limit) {
277 		mtx_unlock(&g_journal_cache_mtx);
278 		g_journal_cache_alloc_failures++;
279 		return (NULL);
280 	}
281 	g_journal_cache_used += size;
282 	mtx_unlock(&g_journal_cache_mtx);
283 	flags &= ~M_NOWAIT;
284 #ifndef GJ_MEMDEBUG
285 	p = malloc(size, M_JOURNAL, flags | M_WAITOK);
286 #else
287 	mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
288 	p = (u_char *)mi + sizeof(*mi);
289 	mi->mi_size = size;
290 	stack_save(&mi->mi_stack);
291 #endif
292 	return (p);
293 }
294 
295 static void
296 gj_free(void *p, size_t size)
297 {
298 #ifdef GJ_MEMDEBUG
299 	struct meminfo *mi;
300 #endif
301 
302 	KASSERT(p != NULL, ("p=NULL"));
303 	KASSERT(size > 0, ("size=0"));
304 	mtx_lock(&g_journal_cache_mtx);
305 	KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
306 	g_journal_cache_used -= size;
307 	mtx_unlock(&g_journal_cache_mtx);
308 #ifdef GJ_MEMDEBUG
309 	mi = p = (void *)((u_char *)p - sizeof(*mi));
310 	if (mi->mi_size != size) {
311 		printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
312 		    mi->mi_size);
313 		printf("GJOURNAL: Alloc backtrace:\n");
314 		stack_print(&mi->mi_stack);
315 		printf("GJOURNAL: Free backtrace:\n");
316 		kdb_backtrace();
317 	}
318 #endif
319 	free(p, M_JOURNAL);
320 }
321 
322 static void *
323 gj_realloc(void *p, size_t size, size_t oldsize)
324 {
325 	void *np;
326 
327 #ifndef GJ_MEMDEBUG
328 	mtx_lock(&g_journal_cache_mtx);
329 	g_journal_cache_used -= oldsize;
330 	g_journal_cache_used += size;
331 	mtx_unlock(&g_journal_cache_mtx);
332 	np = realloc(p, size, M_JOURNAL, M_WAITOK);
333 #else
334 	np = gj_malloc(size, M_WAITOK);
335 	bcopy(p, np, MIN(oldsize, size));
336 	gj_free(p, oldsize);
337 #endif
338 	return (np);
339 }
340 
341 static void
342 g_journal_check_overflow(struct g_journal_softc *sc)
343 {
344 	off_t length, used;
345 
346 	if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
347 	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
348 	    (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
349 	     sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
350 	     sc->sc_journal_offset < sc->sc_active.jj_offset)) {
351 		panic("Journal overflow "
352 		    "(id = %u joffset=%jd active=%jd inactive=%jd)",
353 		    (unsigned)sc->sc_id,
354 		    (intmax_t)sc->sc_journal_offset,
355 		    (intmax_t)sc->sc_active.jj_offset,
356 		    (intmax_t)sc->sc_inactive.jj_offset);
357 	}
358 	if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
359 		length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
360 		used = sc->sc_journal_offset - sc->sc_active.jj_offset;
361 	} else {
362 		length = sc->sc_jend - sc->sc_active.jj_offset;
363 		length += sc->sc_inactive.jj_offset - sc->sc_jstart;
364 		if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
365 			used = sc->sc_journal_offset - sc->sc_active.jj_offset;
366 		else {
367 			used = sc->sc_jend - sc->sc_active.jj_offset;
368 			used += sc->sc_journal_offset - sc->sc_jstart;
369 		}
370 	}
371 	/* Already woken up? */
372 	if (g_journal_switcher_wokenup)
373 		return;
374 	/*
375 	 * If the active journal takes more than g_journal_force_switch precent
376 	 * of free journal space, we force journal switch.
377 	 */
378 	KASSERT(length > 0,
379 	    ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
380 	    (intmax_t)length, (intmax_t)used,
381 	    (intmax_t)sc->sc_active.jj_offset,
382 	    (intmax_t)sc->sc_inactive.jj_offset,
383 	    (intmax_t)sc->sc_journal_offset));
384 	if ((used * 100) / length > g_journal_force_switch) {
385 		g_journal_stats_journal_full++;
386 		GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
387 		    sc->sc_name, (used * 100) / length);
388 		mtx_lock(&g_journal_cache_mtx);
389 		g_journal_switcher_wokenup = 1;
390 		wakeup(&g_journal_switcher_state);
391 		mtx_unlock(&g_journal_cache_mtx);
392 	}
393 }
394 
395 static void
396 g_journal_orphan(struct g_consumer *cp)
397 {
398 	struct g_journal_softc *sc;
399 	char name[256];
400 	int error;
401 
402 	g_topology_assert();
403 	sc = cp->geom->softc;
404 	strlcpy(name, cp->provider->name, sizeof(name));
405 	GJ_DEBUG(0, "Lost provider %s.", name);
406 	if (sc == NULL)
407 		return;
408 	error = g_journal_destroy(sc);
409 	if (error == 0)
410 		GJ_DEBUG(0, "Journal %s destroyed.", name);
411 	else {
412 		GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
413 		    "Destroy it manually after last close.", sc->sc_name,
414 		    error);
415 	}
416 }
417 
418 static int
419 g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
420 {
421 	struct g_journal_softc *sc;
422 	int dcw;
423 
424 	g_topology_assert();
425 	GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
426 	    acr, acw, ace);
427 
428 	dcw = pp->acw + acw;
429 
430 	sc = pp->geom->softc;
431 	if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
432 		if (acr <= 0 && acw <= 0 && ace <= 0)
433 			return (0);
434 		else
435 			return (ENXIO);
436 	}
437 	if (pp->acw == 0 && dcw > 0) {
438 		GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
439 		sc->sc_flags &= ~GJF_DEVICE_CLEAN;
440 		g_topology_unlock();
441 		g_journal_metadata_update(sc);
442 		g_topology_lock();
443 	} /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
444 		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
445 		sc->sc_flags |= GJF_DEVICE_CLEAN;
446 		g_topology_unlock();
447 		g_journal_metadata_update(sc);
448 		g_topology_lock();
449 	} */
450 	return (0);
451 }
452 
453 static void
454 g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
455 {
456 
457 	bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
458 	data += sizeof(GJ_HEADER_MAGIC);
459 	le32enc(data, hdr->jh_journal_id);
460 	data += 4;
461 	le32enc(data, hdr->jh_journal_next_id);
462 }
463 
464 static int
465 g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
466 {
467 
468 	bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
469 	data += sizeof(hdr->jh_magic);
470 	if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
471 		return (EINVAL);
472 	hdr->jh_journal_id = le32dec(data);
473 	data += 4;
474 	hdr->jh_journal_next_id = le32dec(data);
475 	return (0);
476 }
477 
478 static void
479 g_journal_flush_cache(struct g_journal_softc *sc)
480 {
481 	struct bintime bt;
482 	int error;
483 
484 	if (sc->sc_bio_flush == 0)
485 		return;
486 	GJ_TIMER_START(1, &bt);
487 	if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
488 		error = g_io_flush(sc->sc_jconsumer);
489 		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
490 		    sc->sc_jconsumer->provider->name, error);
491 	}
492 	if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
493 		/*
494 		 * TODO: This could be called in parallel with the
495 		 *       previous call.
496 		 */
497 		error = g_io_flush(sc->sc_dconsumer);
498 		GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
499 		    sc->sc_dconsumer->provider->name, error);
500 	}
501 	GJ_TIMER_STOP(1, &bt, "Cache flush time");
502 }
503 
504 static int
505 g_journal_write_header(struct g_journal_softc *sc)
506 {
507 	struct g_journal_header hdr;
508 	struct g_consumer *cp;
509 	u_char *buf;
510 	int error;
511 
512 	cp = sc->sc_jconsumer;
513 	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
514 
515 	strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
516 	hdr.jh_journal_id = sc->sc_journal_id;
517 	hdr.jh_journal_next_id = sc->sc_journal_next_id;
518 	g_journal_header_encode(&hdr, buf);
519 	error = g_write_data(cp, sc->sc_journal_offset, buf,
520 	    cp->provider->sectorsize);
521 	/* if (error == 0) */
522 	sc->sc_journal_offset += cp->provider->sectorsize;
523 
524 	gj_free(buf, cp->provider->sectorsize);
525 	return (error);
526 }
527 
528 /*
529  * Every journal record has a header and data following it.
530  * Functions below are used to decode the header before storing it to
531  * little endian and to encode it after reading to system endianness.
532  */
533 static void
534 g_journal_record_header_encode(struct g_journal_record_header *hdr,
535     u_char *data)
536 {
537 	struct g_journal_entry *ent;
538 	u_int i;
539 
540 	bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
541 	data += sizeof(GJ_RECORD_HEADER_MAGIC);
542 	le32enc(data, hdr->jrh_journal_id);
543 	data += 8;
544 	le16enc(data, hdr->jrh_nentries);
545 	data += 2;
546 	bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
547 	data += 8;
548 	for (i = 0; i < hdr->jrh_nentries; i++) {
549 		ent = &hdr->jrh_entries[i];
550 		le64enc(data, ent->je_joffset);
551 		data += 8;
552 		le64enc(data, ent->je_offset);
553 		data += 8;
554 		le64enc(data, ent->je_length);
555 		data += 8;
556 	}
557 }
558 
559 static int
560 g_journal_record_header_decode(const u_char *data,
561     struct g_journal_record_header *hdr)
562 {
563 	struct g_journal_entry *ent;
564 	u_int i;
565 
566 	bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
567 	data += sizeof(hdr->jrh_magic);
568 	if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
569 		return (EINVAL);
570 	hdr->jrh_journal_id = le32dec(data);
571 	data += 8;
572 	hdr->jrh_nentries = le16dec(data);
573 	data += 2;
574 	if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
575 		return (EINVAL);
576 	bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
577 	data += 8;
578 	for (i = 0; i < hdr->jrh_nentries; i++) {
579 		ent = &hdr->jrh_entries[i];
580 		ent->je_joffset = le64dec(data);
581 		data += 8;
582 		ent->je_offset = le64dec(data);
583 		data += 8;
584 		ent->je_length = le64dec(data);
585 		data += 8;
586 	}
587 	return (0);
588 }
589 
590 /*
591  * Function reads metadata from a provider (via the given consumer), decodes
592  * it to system endianness and verifies its correctness.
593  */
594 static int
595 g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
596 {
597 	struct g_provider *pp;
598 	u_char *buf;
599 	int error;
600 
601 	g_topology_assert();
602 
603 	error = g_access(cp, 1, 0, 0);
604 	if (error != 0)
605 		return (error);
606 	pp = cp->provider;
607 	g_topology_unlock();
608 	/* Metadata is stored in last sector. */
609 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
610 	    &error);
611 	g_topology_lock();
612 	g_access(cp, -1, 0, 0);
613 	if (buf == NULL) {
614 		GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
615 		    cp->provider->name, error);
616 		return (error);
617 	}
618 
619 	/* Decode metadata. */
620 	error = journal_metadata_decode(buf, md);
621 	g_free(buf);
622 	/* Is this is gjournal provider at all? */
623 	if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
624 		return (EINVAL);
625 	/*
626 	 * Are we able to handle this version of metadata?
627 	 * We only maintain backward compatibility.
628 	 */
629 	if (md->md_version > G_JOURNAL_VERSION) {
630 		GJ_DEBUG(0,
631 		    "Kernel module is too old to handle metadata from %s.",
632 		    cp->provider->name);
633 		return (EINVAL);
634 	}
635 	/* Is checksum correct? */
636 	if (error != 0) {
637 		GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
638 		    cp->provider->name);
639 		return (error);
640 	}
641 	return (0);
642 }
643 
644 /*
645  * Two functions below are responsible for updating metadata.
646  * Only metadata on the data provider is updated (we need to update
647  * information about active journal in there).
648  */
649 static void
650 g_journal_metadata_done(struct bio *bp)
651 {
652 
653 	/*
654 	 * There is not much we can do on error except informing about it.
655 	 */
656 	if (bp->bio_error != 0) {
657 		GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
658 		    bp->bio_error);
659 	} else {
660 		GJ_LOGREQ(2, bp, "Metadata updated.");
661 	}
662 	gj_free(bp->bio_data, bp->bio_length);
663 	g_destroy_bio(bp);
664 }
665 
666 static void
667 g_journal_metadata_update(struct g_journal_softc *sc)
668 {
669 	struct g_journal_metadata md;
670 	struct g_consumer *cp;
671 	struct bio *bp;
672 	u_char *sector;
673 
674 	cp = sc->sc_dconsumer;
675 	sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
676 	strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
677 	md.md_version = G_JOURNAL_VERSION;
678 	md.md_id = sc->sc_id;
679 	md.md_type = sc->sc_orig_type;
680 	md.md_jstart = sc->sc_jstart;
681 	md.md_jend = sc->sc_jend;
682 	md.md_joffset = sc->sc_inactive.jj_offset;
683 	md.md_jid = sc->sc_journal_previous_id;
684 	md.md_flags = 0;
685 	if (sc->sc_flags & GJF_DEVICE_CLEAN)
686 		md.md_flags |= GJ_FLAG_CLEAN;
687 
688 	if (sc->sc_flags & GJF_DEVICE_HARDCODED)
689 		strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
690 	else
691 		bzero(md.md_provider, sizeof(md.md_provider));
692 	md.md_provsize = cp->provider->mediasize;
693 	journal_metadata_encode(&md, sector);
694 
695 	/*
696 	 * Flush the cache, so we know all data are on disk.
697 	 * We write here informations like "journal is consistent", so we need
698 	 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
699 	 * where metadata is stored on disk, but not all data.
700 	 */
701 	g_journal_flush_cache(sc);
702 
703 	bp = g_alloc_bio();
704 	bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
705 	bp->bio_length = cp->provider->sectorsize;
706 	bp->bio_data = sector;
707 	bp->bio_cmd = BIO_WRITE;
708 	if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
709 		bp->bio_done = g_journal_metadata_done;
710 		g_io_request(bp, cp);
711 	} else {
712 		bp->bio_done = NULL;
713 		g_io_request(bp, cp);
714 		biowait(bp, "gjmdu");
715 		g_journal_metadata_done(bp);
716 	}
717 
718 	/*
719 	 * Be sure metadata reached the disk.
720 	 */
721 	g_journal_flush_cache(sc);
722 }
723 
724 /*
725  * This is where the I/O request comes from the GEOM.
726  */
727 static void
728 g_journal_start(struct bio *bp)
729 {
730 	struct g_journal_softc *sc;
731 
732 	sc = bp->bio_to->geom->softc;
733 	GJ_LOGREQ(3, bp, "Request received.");
734 
735 	switch (bp->bio_cmd) {
736 	case BIO_READ:
737 	case BIO_WRITE:
738 		mtx_lock(&sc->sc_mtx);
739 		bioq_insert_tail(&sc->sc_regular_queue, bp);
740 		wakeup(sc);
741 		mtx_unlock(&sc->sc_mtx);
742 		return;
743 	case BIO_GETATTR:
744 		if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
745 			strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
746 			bp->bio_completed = strlen(bp->bio_to->name) + 1;
747 			g_io_deliver(bp, 0);
748 			return;
749 		}
750 		/* FALLTHROUGH */
751 	case BIO_SPEEDUP:
752 	case BIO_DELETE:
753 	default:
754 		g_io_deliver(bp, EOPNOTSUPP);
755 		return;
756 	}
757 }
758 
759 static void
760 g_journal_std_done(struct bio *bp)
761 {
762 	struct g_journal_softc *sc;
763 
764 	sc = bp->bio_from->geom->softc;
765 	mtx_lock(&sc->sc_mtx);
766 	bioq_insert_tail(&sc->sc_back_queue, bp);
767 	wakeup(sc);
768 	mtx_unlock(&sc->sc_mtx);
769 }
770 
771 static struct bio *
772 g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
773     int flags)
774 {
775 	struct bio *bp;
776 
777 	bp = g_alloc_bio();
778 	bp->bio_offset = start;
779 	bp->bio_joffset = joffset;
780 	bp->bio_length = end - start;
781 	bp->bio_cmd = BIO_WRITE;
782 	bp->bio_done = g_journal_std_done;
783 	if (data == NULL)
784 		bp->bio_data = NULL;
785 	else {
786 		bp->bio_data = gj_malloc(bp->bio_length, flags);
787 		if (bp->bio_data != NULL)
788 			bcopy(data, bp->bio_data, bp->bio_length);
789 	}
790 	return (bp);
791 }
792 
793 #define	g_journal_insert_bio(head, bp, flags)				\
794 	g_journal_insert((head), (bp)->bio_offset,			\
795 		(bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset,	\
796 		(bp)->bio_data, flags)
797 /*
798  * The function below does a lot more than just inserting bio to the queue.
799  * It keeps the queue sorted by offset and ensures that there are no doubled
800  * data (it combines bios where ranges overlap).
801  *
802  * The function returns the number of bios inserted (as bio can be splitted).
803  */
804 static int
805 g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
806     u_char *data, int flags)
807 {
808 	struct bio *nbp, *cbp, *pbp;
809 	off_t cstart, cend;
810 	u_char *tmpdata;
811 	int n;
812 
813 	GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
814 	    joffset);
815 	n = 0;
816 	pbp = NULL;
817 	GJQ_FOREACH(*head, cbp) {
818 		cstart = cbp->bio_offset;
819 		cend = cbp->bio_offset + cbp->bio_length;
820 
821 		if (nstart >= cend) {
822 			/*
823 			 *  +-------------+
824 			 *  |             |
825 			 *  |   current   |  +-------------+
826 			 *  |     bio     |  |             |
827 			 *  |             |  |     new     |
828 			 *  +-------------+  |     bio     |
829 			 *                   |             |
830 			 *                   +-------------+
831 			 */
832 			GJ_DEBUG(3, "INSERT(%p): 1", *head);
833 		} else if (nend <= cstart) {
834 			/*
835 			 *                   +-------------+
836 			 *                   |             |
837 			 *  +-------------+  |   current   |
838 			 *  |             |  |     bio     |
839 			 *  |     new     |  |             |
840 			 *  |     bio     |  +-------------+
841 			 *  |             |
842 			 *  +-------------+
843 			 */
844 			nbp = g_journal_new_bio(nstart, nend, joffset, data,
845 			    flags);
846 			if (pbp == NULL)
847 				*head = nbp;
848 			else
849 				pbp->bio_next = nbp;
850 			nbp->bio_next = cbp;
851 			n++;
852 			GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
853 			    pbp);
854 			goto end;
855 		} else if (nstart <= cstart && nend >= cend) {
856 			/*
857 			 *      +-------------+      +-------------+
858 			 *      | current bio |      | current bio |
859 			 *  +---+-------------+---+  +-------------+---+
860 			 *  |   |             |   |  |             |   |
861 			 *  |   |             |   |  |             |   |
862 			 *  |   +-------------+   |  +-------------+   |
863 			 *  |       new bio       |  |     new bio     |
864 			 *  +---------------------+  +-----------------+
865 			 *
866 			 *      +-------------+  +-------------+
867 			 *      | current bio |  | current bio |
868 			 *  +---+-------------+  +-------------+
869 			 *  |   |             |  |             |
870 			 *  |   |             |  |             |
871 			 *  |   +-------------+  +-------------+
872 			 *  |     new bio     |  |   new bio   |
873 			 *  +-----------------+  +-------------+
874 			 */
875 			g_journal_stats_bytes_skipped += cbp->bio_length;
876 			cbp->bio_offset = nstart;
877 			cbp->bio_joffset = joffset;
878 			cbp->bio_length = cend - nstart;
879 			if (cbp->bio_data != NULL) {
880 				gj_free(cbp->bio_data, cend - cstart);
881 				cbp->bio_data = NULL;
882 			}
883 			if (data != NULL) {
884 				cbp->bio_data = gj_malloc(cbp->bio_length,
885 				    flags);
886 				if (cbp->bio_data != NULL) {
887 					bcopy(data, cbp->bio_data,
888 					    cbp->bio_length);
889 				}
890 				data += cend - nstart;
891 			}
892 			joffset += cend - nstart;
893 			nstart = cend;
894 			GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
895 		} else if (nstart > cstart && nend >= cend) {
896 			/*
897 			 *  +-----------------+  +-------------+
898 			 *  |   current bio   |  | current bio |
899 			 *  |   +-------------+  |   +---------+---+
900 			 *  |   |             |  |   |         |   |
901 			 *  |   |             |  |   |         |   |
902 			 *  +---+-------------+  +---+---------+   |
903 			 *      |   new bio   |      |   new bio   |
904 			 *      +-------------+      +-------------+
905 			 */
906 			g_journal_stats_bytes_skipped += cend - nstart;
907 			nbp = g_journal_new_bio(nstart, cend, joffset, data,
908 			    flags);
909 			nbp->bio_next = cbp->bio_next;
910 			cbp->bio_next = nbp;
911 			cbp->bio_length = nstart - cstart;
912 			if (cbp->bio_data != NULL) {
913 				cbp->bio_data = gj_realloc(cbp->bio_data,
914 				    cbp->bio_length, cend - cstart);
915 			}
916 			if (data != NULL)
917 				data += cend - nstart;
918 			joffset += cend - nstart;
919 			nstart = cend;
920 			n++;
921 			GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
922 		} else if (nstart > cstart && nend < cend) {
923 			/*
924 			 *  +---------------------+
925 			 *  |     current bio     |
926 			 *  |   +-------------+   |
927 			 *  |   |             |   |
928 			 *  |   |             |   |
929 			 *  +---+-------------+---+
930 			 *      |   new bio   |
931 			 *      +-------------+
932 			 */
933 			g_journal_stats_bytes_skipped += nend - nstart;
934 			nbp = g_journal_new_bio(nstart, nend, joffset, data,
935 			    flags);
936 			nbp->bio_next = cbp->bio_next;
937 			cbp->bio_next = nbp;
938 			if (cbp->bio_data == NULL)
939 				tmpdata = NULL;
940 			else
941 				tmpdata = cbp->bio_data + nend - cstart;
942 			nbp = g_journal_new_bio(nend, cend,
943 			    cbp->bio_joffset + nend - cstart, tmpdata, flags);
944 			nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
945 			((struct bio *)cbp->bio_next)->bio_next = nbp;
946 			cbp->bio_length = nstart - cstart;
947 			if (cbp->bio_data != NULL) {
948 				cbp->bio_data = gj_realloc(cbp->bio_data,
949 				    cbp->bio_length, cend - cstart);
950 			}
951 			n += 2;
952 			GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
953 			goto end;
954 		} else if (nstart <= cstart && nend < cend) {
955 			/*
956 			 *  +-----------------+      +-------------+
957 			 *  |   current bio   |      | current bio |
958 			 *  +-------------+   |  +---+---------+   |
959 			 *  |             |   |  |   |         |   |
960 			 *  |             |   |  |   |         |   |
961 			 *  +-------------+---+  |   +---------+---+
962 			 *  |   new bio   |      |   new bio   |
963 			 *  +-------------+      +-------------+
964 			 */
965 			g_journal_stats_bytes_skipped += nend - nstart;
966 			nbp = g_journal_new_bio(nstart, nend, joffset, data,
967 			    flags);
968 			if (pbp == NULL)
969 				*head = nbp;
970 			else
971 				pbp->bio_next = nbp;
972 			nbp->bio_next = cbp;
973 			cbp->bio_offset = nend;
974 			cbp->bio_length = cend - nend;
975 			cbp->bio_joffset += nend - cstart;
976 			tmpdata = cbp->bio_data;
977 			if (tmpdata != NULL) {
978 				cbp->bio_data = gj_malloc(cbp->bio_length,
979 				    flags);
980 				if (cbp->bio_data != NULL) {
981 					bcopy(tmpdata + nend - cstart,
982 					    cbp->bio_data, cbp->bio_length);
983 				}
984 				gj_free(tmpdata, cend - cstart);
985 			}
986 			n++;
987 			GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
988 			goto end;
989 		}
990 		if (nstart == nend)
991 			goto end;
992 		pbp = cbp;
993 	}
994 	nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
995 	if (pbp == NULL)
996 		*head = nbp;
997 	else
998 		pbp->bio_next = nbp;
999 	nbp->bio_next = NULL;
1000 	n++;
1001 	GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
1002 end:
1003 	if (g_journal_debug >= 3) {
1004 		GJQ_FOREACH(*head, cbp) {
1005 			GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
1006 			    (intmax_t)cbp->bio_offset,
1007 			    (intmax_t)cbp->bio_length,
1008 			    (intmax_t)cbp->bio_joffset, cbp->bio_data);
1009 		}
1010 		GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1011 	}
1012 	return (n);
1013 }
1014 
1015 /*
1016  * The function combines neighbour bios trying to squeeze as much data as
1017  * possible into one bio.
1018  *
1019  * The function returns the number of bios combined (negative value).
1020  */
1021 static int
1022 g_journal_optimize(struct bio *head)
1023 {
1024 	struct bio *cbp, *pbp;
1025 	int n;
1026 
1027 	n = 0;
1028 	pbp = NULL;
1029 	GJQ_FOREACH(head, cbp) {
1030 		/* Skip bios which has to be read first. */
1031 		if (cbp->bio_data == NULL) {
1032 			pbp = NULL;
1033 			continue;
1034 		}
1035 		/* There is no previous bio yet. */
1036 		if (pbp == NULL) {
1037 			pbp = cbp;
1038 			continue;
1039 		}
1040 		/* Is this a neighbour bio? */
1041 		if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1042 			/* Be sure that bios queue is sorted. */
1043 			KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1044 			    ("poffset=%jd plength=%jd coffset=%jd",
1045 			    (intmax_t)pbp->bio_offset,
1046 			    (intmax_t)pbp->bio_length,
1047 			    (intmax_t)cbp->bio_offset));
1048 			pbp = cbp;
1049 			continue;
1050 		}
1051 		/* Be sure we don't end up with too big bio. */
1052 		if (pbp->bio_length + cbp->bio_length > maxphys) {
1053 			pbp = cbp;
1054 			continue;
1055 		}
1056 		/* Ok, we can join bios. */
1057 		GJ_LOGREQ(4, pbp, "Join: ");
1058 		GJ_LOGREQ(4, cbp, "and: ");
1059 		pbp->bio_data = gj_realloc(pbp->bio_data,
1060 		    pbp->bio_length + cbp->bio_length, pbp->bio_length);
1061 		bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1062 		    cbp->bio_length);
1063 		gj_free(cbp->bio_data, cbp->bio_length);
1064 		pbp->bio_length += cbp->bio_length;
1065 		pbp->bio_next = cbp->bio_next;
1066 		g_destroy_bio(cbp);
1067 		cbp = pbp;
1068 		g_journal_stats_combined_ios++;
1069 		n--;
1070 		GJ_LOGREQ(4, pbp, "Got: ");
1071 	}
1072 	return (n);
1073 }
1074 
1075 /*
1076  * TODO: Update comment.
1077  * These are functions responsible for copying one portion of data from journal
1078  * to the destination provider.
1079  * The order goes like this:
1080  * 1. Read the header, which contains informations about data blocks
1081  *    following it.
1082  * 2. Read the data blocks from the journal.
1083  * 3. Write the data blocks on the data provider.
1084  *
1085  * g_journal_copy_start()
1086  * g_journal_copy_done() - got finished write request, logs potential errors.
1087  */
1088 
1089 /*
1090  * When there is no data in cache, this function is used to read it.
1091  */
1092 static void
1093 g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1094 {
1095 	struct bio *cbp;
1096 
1097 	/*
1098 	 * We were short in memory, so data was freed.
1099 	 * In that case we need to read it back from journal.
1100 	 */
1101 	cbp = g_alloc_bio();
1102 	cbp->bio_cflags = bp->bio_cflags;
1103 	cbp->bio_parent = bp;
1104 	cbp->bio_offset = bp->bio_joffset;
1105 	cbp->bio_length = bp->bio_length;
1106 	cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1107 	cbp->bio_cmd = BIO_READ;
1108 	cbp->bio_done = g_journal_std_done;
1109 	GJ_LOGREQ(4, cbp, "READ FIRST");
1110 	g_io_request(cbp, sc->sc_jconsumer);
1111 	g_journal_cache_misses++;
1112 }
1113 
1114 static void
1115 g_journal_copy_send(struct g_journal_softc *sc)
1116 {
1117 	struct bio *bioq, *bp, *lbp;
1118 
1119 	bioq = lbp = NULL;
1120 	mtx_lock(&sc->sc_mtx);
1121 	for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1122 		bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1123 		if (bp == NULL)
1124 			break;
1125 		GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1126 		sc->sc_copy_in_progress++;
1127 		GJQ_INSERT_AFTER(bioq, bp, lbp);
1128 		lbp = bp;
1129 	}
1130 	mtx_unlock(&sc->sc_mtx);
1131 	if (g_journal_do_optimize)
1132 		sc->sc_copy_in_progress += g_journal_optimize(bioq);
1133 	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1134 		GJQ_REMOVE(bioq, bp);
1135 		GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1136 		bp->bio_cflags = GJ_BIO_COPY;
1137 		if (bp->bio_data == NULL)
1138 			g_journal_read_first(sc, bp);
1139 		else {
1140 			bp->bio_joffset = 0;
1141 			GJ_LOGREQ(4, bp, "SEND");
1142 			g_io_request(bp, sc->sc_dconsumer);
1143 		}
1144 	}
1145 }
1146 
1147 static void
1148 g_journal_copy_start(struct g_journal_softc *sc)
1149 {
1150 
1151 	/*
1152 	 * Remember in metadata that we're starting to copy journaled data
1153 	 * to the data provider.
1154 	 * In case of power failure, we will copy these data once again on boot.
1155 	 */
1156 	if (!sc->sc_journal_copying) {
1157 		sc->sc_journal_copying = 1;
1158 		GJ_DEBUG(1, "Starting copy of journal.");
1159 		g_journal_metadata_update(sc);
1160 	}
1161 	g_journal_copy_send(sc);
1162 }
1163 
1164 /*
1165  * Data block has been read from the journal provider.
1166  */
1167 static int
1168 g_journal_copy_read_done(struct bio *bp)
1169 {
1170 	struct g_journal_softc *sc;
1171 	struct g_consumer *cp;
1172 	struct bio *pbp;
1173 
1174 	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1175 	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1176 
1177 	sc = bp->bio_from->geom->softc;
1178 	pbp = bp->bio_parent;
1179 
1180 	if (bp->bio_error != 0) {
1181 		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1182 		    bp->bio_to->name, bp->bio_error);
1183 		/*
1184 		 * We will not be able to deliver WRITE request as well.
1185 		 */
1186 		gj_free(bp->bio_data, bp->bio_length);
1187 		g_destroy_bio(pbp);
1188 		g_destroy_bio(bp);
1189 		sc->sc_copy_in_progress--;
1190 		return (1);
1191 	}
1192 	pbp->bio_data = bp->bio_data;
1193 	cp = sc->sc_dconsumer;
1194 	g_io_request(pbp, cp);
1195 	GJ_LOGREQ(4, bp, "READ DONE");
1196 	g_destroy_bio(bp);
1197 	return (0);
1198 }
1199 
1200 /*
1201  * Data block has been written to the data provider.
1202  */
1203 static void
1204 g_journal_copy_write_done(struct bio *bp)
1205 {
1206 	struct g_journal_softc *sc;
1207 
1208 	KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1209 	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1210 
1211 	sc = bp->bio_from->geom->softc;
1212 	sc->sc_copy_in_progress--;
1213 
1214 	if (bp->bio_error != 0) {
1215 		GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1216 		    bp->bio_error);
1217 	}
1218 	GJQ_REMOVE(sc->sc_copy_queue, bp);
1219 	gj_free(bp->bio_data, bp->bio_length);
1220 	GJ_LOGREQ(4, bp, "DONE");
1221 	g_destroy_bio(bp);
1222 
1223 	if (sc->sc_copy_in_progress == 0) {
1224 		/*
1225 		 * This was the last write request for this journal.
1226 		 */
1227 		GJ_DEBUG(1, "Data has been copied.");
1228 		sc->sc_journal_copying = 0;
1229 	}
1230 }
1231 
1232 static void g_journal_flush_done(struct bio *bp);
1233 
1234 /*
1235  * Flush one record onto active journal provider.
1236  */
1237 static void
1238 g_journal_flush(struct g_journal_softc *sc)
1239 {
1240 	struct g_journal_record_header hdr;
1241 	struct g_journal_entry *ent;
1242 	struct g_provider *pp;
1243 	struct bio **bioq;
1244 	struct bio *bp, *fbp, *pbp;
1245 	off_t joffset;
1246 	u_char *data, hash[16];
1247 	MD5_CTX ctx;
1248 	u_int i;
1249 
1250 	if (sc->sc_current_count == 0)
1251 		return;
1252 
1253 	pp = sc->sc_jprovider;
1254 	GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1255 	joffset = sc->sc_journal_offset;
1256 
1257 	GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1258 	    sc->sc_current_count, pp->name, (intmax_t)joffset);
1259 
1260 	/*
1261 	 * Store 'journal id', so we know to which journal this record belongs.
1262 	 */
1263 	hdr.jrh_journal_id = sc->sc_journal_id;
1264 	/* Could be less than g_journal_record_entries if called due timeout. */
1265 	hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1266 	strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1267 
1268 	bioq = &sc->sc_active.jj_queue;
1269 	GJQ_LAST(sc->sc_flush_queue, pbp);
1270 
1271 	fbp = g_alloc_bio();
1272 	fbp->bio_parent = NULL;
1273 	fbp->bio_cflags = GJ_BIO_JOURNAL;
1274 	fbp->bio_offset = -1;
1275 	fbp->bio_joffset = joffset;
1276 	fbp->bio_length = pp->sectorsize;
1277 	fbp->bio_cmd = BIO_WRITE;
1278 	fbp->bio_done = g_journal_std_done;
1279 	GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1280 	pbp = fbp;
1281 	fbp->bio_to = pp;
1282 	GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1283 	joffset += pp->sectorsize;
1284 	sc->sc_flush_count++;
1285 	if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1286 		MD5Init(&ctx);
1287 
1288 	for (i = 0; i < hdr.jrh_nentries; i++) {
1289 		bp = sc->sc_current_queue;
1290 		KASSERT(bp != NULL, ("NULL bp"));
1291 		bp->bio_to = pp;
1292 		GJ_LOGREQ(4, bp, "FLUSHED");
1293 		sc->sc_current_queue = bp->bio_next;
1294 		bp->bio_next = NULL;
1295 		sc->sc_current_count--;
1296 
1297 		/* Add to the header. */
1298 		ent = &hdr.jrh_entries[i];
1299 		ent->je_offset = bp->bio_offset;
1300 		ent->je_joffset = joffset;
1301 		ent->je_length = bp->bio_length;
1302 
1303 		data = bp->bio_data;
1304 		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1305 			MD5Update(&ctx, data, ent->je_length);
1306 		g_reset_bio(bp);
1307 		bp->bio_cflags = GJ_BIO_JOURNAL;
1308 		bp->bio_offset = ent->je_offset;
1309 		bp->bio_joffset = ent->je_joffset;
1310 		bp->bio_length = ent->je_length;
1311 		bp->bio_data = data;
1312 		bp->bio_cmd = BIO_WRITE;
1313 		bp->bio_done = g_journal_std_done;
1314 		GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1315 		pbp = bp;
1316 		bp->bio_to = pp;
1317 		GJ_LOGREQ(4, bp, "FLUSH_OUT");
1318 		joffset += bp->bio_length;
1319 		sc->sc_flush_count++;
1320 
1321 		/*
1322 		 * Add request to the active sc_journal_queue queue.
1323 		 * This is our cache. After journal switch we don't have to
1324 		 * read the data from the inactive journal, because we keep
1325 		 * it in memory.
1326 		 */
1327 		g_journal_insert(bioq, ent->je_offset,
1328 		    ent->je_offset + ent->je_length, ent->je_joffset, data,
1329 		    M_NOWAIT);
1330 	}
1331 
1332 	/*
1333 	 * After all requests, store valid header.
1334 	 */
1335 	data = gj_malloc(pp->sectorsize, M_WAITOK);
1336 	if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1337 		MD5Final(hash, &ctx);
1338 		bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1339 	}
1340 	g_journal_record_header_encode(&hdr, data);
1341 	fbp->bio_data = data;
1342 
1343 	sc->sc_journal_offset = joffset;
1344 
1345 	g_journal_check_overflow(sc);
1346 }
1347 
1348 /*
1349  * Flush request finished.
1350  */
1351 static void
1352 g_journal_flush_done(struct bio *bp)
1353 {
1354 	struct g_journal_softc *sc;
1355 	struct g_consumer *cp;
1356 
1357 	KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1358 	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1359 
1360 	cp = bp->bio_from;
1361 	sc = cp->geom->softc;
1362 	sc->sc_flush_in_progress--;
1363 
1364 	if (bp->bio_error != 0) {
1365 		GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1366 		    bp->bio_error);
1367 	}
1368 	gj_free(bp->bio_data, bp->bio_length);
1369 	GJ_LOGREQ(4, bp, "DONE");
1370 	g_destroy_bio(bp);
1371 }
1372 
1373 static void g_journal_release_delayed(struct g_journal_softc *sc);
1374 
1375 static void
1376 g_journal_flush_send(struct g_journal_softc *sc)
1377 {
1378 	struct g_consumer *cp;
1379 	struct bio *bioq, *bp, *lbp;
1380 
1381 	cp = sc->sc_jconsumer;
1382 	bioq = lbp = NULL;
1383 	while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1384 		/* Send one flush requests to the active journal. */
1385 		bp = GJQ_FIRST(sc->sc_flush_queue);
1386 		if (bp != NULL) {
1387 			GJQ_REMOVE(sc->sc_flush_queue, bp);
1388 			sc->sc_flush_count--;
1389 			bp->bio_offset = bp->bio_joffset;
1390 			bp->bio_joffset = 0;
1391 			sc->sc_flush_in_progress++;
1392 			GJQ_INSERT_AFTER(bioq, bp, lbp);
1393 			lbp = bp;
1394 		}
1395 		/* Try to release delayed requests. */
1396 		g_journal_release_delayed(sc);
1397 		/* If there are no requests to flush, leave. */
1398 		if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1399 			break;
1400 	}
1401 	if (g_journal_do_optimize)
1402 		sc->sc_flush_in_progress += g_journal_optimize(bioq);
1403 	while ((bp = GJQ_FIRST(bioq)) != NULL) {
1404 		GJQ_REMOVE(bioq, bp);
1405 		GJ_LOGREQ(3, bp, "Flush request send");
1406 		g_io_request(bp, cp);
1407 	}
1408 }
1409 
1410 static void
1411 g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1412 {
1413 	int n;
1414 
1415 	GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1416 	n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1417 	sc->sc_current_count += n;
1418 	n = g_journal_optimize(sc->sc_current_queue);
1419 	sc->sc_current_count += n;
1420 	/*
1421 	 * For requests which are added to the current queue we deliver
1422 	 * response immediately.
1423 	 */
1424 	bp->bio_completed = bp->bio_length;
1425 	g_io_deliver(bp, 0);
1426 	if (sc->sc_current_count >= g_journal_record_entries) {
1427 		/*
1428 		 * Let's flush one record onto active journal provider.
1429 		 */
1430 		g_journal_flush(sc);
1431 	}
1432 }
1433 
1434 static void
1435 g_journal_release_delayed(struct g_journal_softc *sc)
1436 {
1437 	struct bio *bp;
1438 
1439 	for (;;) {
1440 		/* The flush queue is full, exit. */
1441 		if (sc->sc_flush_count >= g_journal_accept_immediately)
1442 			return;
1443 		bp = bioq_takefirst(&sc->sc_delayed_queue);
1444 		if (bp == NULL)
1445 			return;
1446 		sc->sc_delayed_count--;
1447 		g_journal_add_current(sc, bp);
1448 	}
1449 }
1450 
1451 /*
1452  * Add I/O request to the current queue. If we have enough requests for one
1453  * journal record we flush them onto active journal provider.
1454  */
1455 static void
1456 g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1457 {
1458 
1459 	/*
1460 	 * The flush queue is full, we need to delay the request.
1461 	 */
1462 	if (sc->sc_delayed_count > 0 ||
1463 	    sc->sc_flush_count >= g_journal_accept_immediately) {
1464 		GJ_LOGREQ(4, bp, "DELAYED");
1465 		bioq_insert_tail(&sc->sc_delayed_queue, bp);
1466 		sc->sc_delayed_count++;
1467 		return;
1468 	}
1469 
1470 	KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1471 	    ("DELAYED queue not empty."));
1472 	g_journal_add_current(sc, bp);
1473 }
1474 
1475 static void g_journal_read_done(struct bio *bp);
1476 
1477 /*
1478  * Try to find requested data in cache.
1479  */
1480 static struct bio *
1481 g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1482     off_t oend)
1483 {
1484 	off_t cstart, cend;
1485 	struct bio *bp;
1486 
1487 	GJQ_FOREACH(head, bp) {
1488 		if (bp->bio_offset == -1)
1489 			continue;
1490 		cstart = MAX(ostart, bp->bio_offset);
1491 		cend = MIN(oend, bp->bio_offset + bp->bio_length);
1492 		if (cend <= ostart)
1493 			continue;
1494 		else if (cstart >= oend) {
1495 			if (!sorted)
1496 				continue;
1497 			else {
1498 				bp = NULL;
1499 				break;
1500 			}
1501 		}
1502 		if (bp->bio_data == NULL)
1503 			break;
1504 		GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1505 		    bp);
1506 		bcopy(bp->bio_data + cstart - bp->bio_offset,
1507 		    pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1508 		pbp->bio_completed += cend - cstart;
1509 		if (pbp->bio_completed == pbp->bio_length) {
1510 			/*
1511 			 * Cool, the whole request was in cache, deliver happy
1512 			 * message.
1513 			 */
1514 			g_io_deliver(pbp, 0);
1515 			return (pbp);
1516 		}
1517 		break;
1518 	}
1519 	return (bp);
1520 }
1521 
1522 /*
1523  * This function is used for collecting data on read.
1524  * The complexity is because parts of the data can be stored in four different
1525  * places:
1526  * - in memory - the data not yet send to the active journal provider
1527  * - in the active journal
1528  * - in the inactive journal
1529  * - in the data provider
1530  */
1531 static void
1532 g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1533     off_t oend)
1534 {
1535 	struct bio *bp, *nbp, *head;
1536 	off_t cstart, cend;
1537 	u_int i, sorted = 0;
1538 
1539 	GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1540 
1541 	cstart = cend = -1;
1542 	bp = NULL;
1543 	head = NULL;
1544 	for (i = 1; i <= 5; i++) {
1545 		switch (i) {
1546 		case 1:	/* Not-yet-send data. */
1547 			head = sc->sc_current_queue;
1548 			sorted = 1;
1549 			break;
1550 		case 2: /* Skip flush queue as they are also in active queue */
1551 			continue;
1552 		case 3:	/* Active journal. */
1553 			head = sc->sc_active.jj_queue;
1554 			sorted = 1;
1555 			break;
1556 		case 4:	/* Inactive journal. */
1557 			/*
1558 			 * XXX: Here could be a race with g_journal_lowmem().
1559 			 */
1560 			head = sc->sc_inactive.jj_queue;
1561 			sorted = 1;
1562 			break;
1563 		case 5:	/* In-flight to the data provider. */
1564 			head = sc->sc_copy_queue;
1565 			sorted = 0;
1566 			break;
1567 		default:
1568 			panic("gjournal %s: i=%d", __func__, i);
1569 		}
1570 		bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1571 		if (bp == pbp) { /* Got the whole request. */
1572 			GJ_DEBUG(2, "Got the whole request from %u.", i);
1573 			return;
1574 		} else if (bp != NULL) {
1575 			cstart = MAX(ostart, bp->bio_offset);
1576 			cend = MIN(oend, bp->bio_offset + bp->bio_length);
1577 			GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1578 			    i, (intmax_t)cstart, (intmax_t)cend);
1579 			break;
1580 		}
1581 	}
1582 	if (bp != NULL) {
1583 		if (bp->bio_data == NULL) {
1584 			nbp = g_duplicate_bio(pbp);
1585 			nbp->bio_cflags = GJ_BIO_READ;
1586 			nbp->bio_data =
1587 			    pbp->bio_data + cstart - pbp->bio_offset;
1588 			nbp->bio_offset =
1589 			    bp->bio_joffset + cstart - bp->bio_offset;
1590 			nbp->bio_length = cend - cstart;
1591 			nbp->bio_done = g_journal_read_done;
1592 			g_io_request(nbp, sc->sc_jconsumer);
1593 		}
1594 		/*
1595 		 * If we don't have the whole request yet, call g_journal_read()
1596 		 * recursively.
1597 		 */
1598 		if (ostart < cstart)
1599 			g_journal_read(sc, pbp, ostart, cstart);
1600 		if (oend > cend)
1601 			g_journal_read(sc, pbp, cend, oend);
1602 	} else {
1603 		/*
1604 		 * No data in memory, no data in journal.
1605 		 * Its time for asking data provider.
1606 		 */
1607 		GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1608 		nbp = g_duplicate_bio(pbp);
1609 		nbp->bio_cflags = GJ_BIO_READ;
1610 		nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1611 		nbp->bio_offset = ostart;
1612 		nbp->bio_length = oend - ostart;
1613 		nbp->bio_done = g_journal_read_done;
1614 		g_io_request(nbp, sc->sc_dconsumer);
1615 		/* We have the whole request, return here. */
1616 		return;
1617 	}
1618 }
1619 
1620 /*
1621  * Function responsible for handling finished READ requests.
1622  * Actually, g_std_done() could be used here, the only difference is that we
1623  * log error.
1624  */
1625 static void
1626 g_journal_read_done(struct bio *bp)
1627 {
1628 	struct bio *pbp;
1629 
1630 	KASSERT(bp->bio_cflags == GJ_BIO_READ,
1631 	    ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1632 
1633 	pbp = bp->bio_parent;
1634 	pbp->bio_inbed++;
1635 	pbp->bio_completed += bp->bio_length;
1636 
1637 	if (bp->bio_error != 0) {
1638 		if (pbp->bio_error == 0)
1639 			pbp->bio_error = bp->bio_error;
1640 		GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1641 		    bp->bio_to->name, bp->bio_error);
1642 	}
1643 	g_destroy_bio(bp);
1644 	if (pbp->bio_children == pbp->bio_inbed &&
1645 	    pbp->bio_completed == pbp->bio_length) {
1646 		/* We're done. */
1647 		g_io_deliver(pbp, 0);
1648 	}
1649 }
1650 
1651 /*
1652  * Deactive current journal and active next one.
1653  */
1654 static void
1655 g_journal_switch(struct g_journal_softc *sc)
1656 {
1657 	struct g_provider *pp;
1658 
1659 	if (JEMPTY(sc)) {
1660 		GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1661 		pp = LIST_FIRST(&sc->sc_geom->provider);
1662 		if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1663 			sc->sc_flags |= GJF_DEVICE_CLEAN;
1664 			GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1665 			g_journal_metadata_update(sc);
1666 		}
1667 	} else {
1668 		GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1669 
1670 		pp = sc->sc_jprovider;
1671 
1672 		sc->sc_journal_previous_id = sc->sc_journal_id;
1673 
1674 		sc->sc_journal_id = sc->sc_journal_next_id;
1675 		sc->sc_journal_next_id = arc4random();
1676 
1677 		GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1678 
1679 		g_journal_write_header(sc);
1680 
1681 		sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1682 		sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1683 
1684 		sc->sc_active.jj_offset =
1685 		    sc->sc_journal_offset - pp->sectorsize;
1686 		sc->sc_active.jj_queue = NULL;
1687 
1688 		/*
1689 		 * Switch is done, start copying data from the (now) inactive
1690 		 * journal to the data provider.
1691 		 */
1692 		g_journal_copy_start(sc);
1693 	}
1694 	mtx_lock(&sc->sc_mtx);
1695 	sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1696 	mtx_unlock(&sc->sc_mtx);
1697 }
1698 
1699 static void
1700 g_journal_initialize(struct g_journal_softc *sc)
1701 {
1702 
1703 	sc->sc_journal_id = arc4random();
1704 	sc->sc_journal_next_id = arc4random();
1705 	sc->sc_journal_previous_id = sc->sc_journal_id;
1706 	sc->sc_journal_offset = sc->sc_jstart;
1707 	sc->sc_inactive.jj_offset = sc->sc_jstart;
1708 	g_journal_write_header(sc);
1709 	sc->sc_active.jj_offset = sc->sc_jstart;
1710 }
1711 
1712 static void
1713 g_journal_mark_as_dirty(struct g_journal_softc *sc)
1714 {
1715 	const struct g_journal_desc *desc;
1716 	int i;
1717 
1718 	GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1719 	for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1720 		desc->jd_dirty(sc->sc_dconsumer);
1721 }
1722 
1723 /*
1724  * Function read record header from the given journal.
1725  * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1726  * and data on every call.
1727  */
1728 static int
1729 g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1730     void *data)
1731 {
1732 	int error;
1733 
1734 	g_reset_bio(bp);
1735 	bp->bio_cmd = BIO_READ;
1736 	bp->bio_done = NULL;
1737 	bp->bio_offset = offset;
1738 	bp->bio_length = cp->provider->sectorsize;
1739 	bp->bio_data = data;
1740 	g_io_request(bp, cp);
1741 	error = biowait(bp, "gjs_read");
1742 	return (error);
1743 }
1744 
1745 #if 0
1746 /*
1747  * Function is called when we start the journal device and we detect that
1748  * one of the journals was not fully copied.
1749  * The purpose of this function is to read all records headers from journal
1750  * and placed them in the inactive queue, so we can start journal
1751  * synchronization process and the journal provider itself.
1752  * Design decision was taken to not synchronize the whole journal here as it
1753  * can take too much time. Reading headers only and delaying synchronization
1754  * process until after journal provider is started should be the best choice.
1755  */
1756 #endif
1757 
1758 static void
1759 g_journal_sync(struct g_journal_softc *sc)
1760 {
1761 	struct g_journal_record_header rhdr;
1762 	struct g_journal_entry *ent;
1763 	struct g_journal_header jhdr;
1764 	struct g_consumer *cp;
1765 	struct bio *bp, *fbp, *tbp;
1766 	off_t joffset, offset;
1767 	u_char *buf, sum[16];
1768 	uint64_t id;
1769 	MD5_CTX ctx;
1770 	int error, found, i;
1771 
1772 	found = 0;
1773 	fbp = NULL;
1774 	cp = sc->sc_jconsumer;
1775 	bp = g_alloc_bio();
1776 	buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1777 	offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1778 
1779 	GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1780 
1781 	/*
1782 	 * Read and decode first journal header.
1783 	 */
1784 	error = g_journal_sync_read(cp, bp, offset, buf);
1785 	if (error != 0) {
1786 		GJ_DEBUG(0, "Error while reading journal header from %s.",
1787 		    cp->provider->name);
1788 		goto end;
1789 	}
1790 	error = g_journal_header_decode(buf, &jhdr);
1791 	if (error != 0) {
1792 		GJ_DEBUG(0, "Cannot decode journal header from %s.",
1793 		    cp->provider->name);
1794 		goto end;
1795 	}
1796 	id = sc->sc_journal_id;
1797 	if (jhdr.jh_journal_id != sc->sc_journal_id) {
1798 		GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1799 		    (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1800 		goto end;
1801 	}
1802 	offset += cp->provider->sectorsize;
1803 	id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1804 
1805 	for (;;) {
1806 		/*
1807 		 * If the biggest record won't fit, look for a record header or
1808 		 * journal header from the beginning.
1809 		 */
1810 		GJ_VALIDATE_OFFSET(offset, sc);
1811 		error = g_journal_sync_read(cp, bp, offset, buf);
1812 		if (error != 0) {
1813 			/*
1814 			 * Not good. Having an error while reading header
1815 			 * means, that we cannot read next headers and in
1816 			 * consequence we cannot find termination.
1817 			 */
1818 			GJ_DEBUG(0,
1819 			    "Error while reading record header from %s.",
1820 			    cp->provider->name);
1821 			break;
1822 		}
1823 
1824 		error = g_journal_record_header_decode(buf, &rhdr);
1825 		if (error != 0) {
1826 			GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1827 			    (intmax_t)offset, error);
1828 			/*
1829 			 * This is not a record header.
1830 			 * If we are lucky, this is next journal header.
1831 			 */
1832 			error = g_journal_header_decode(buf, &jhdr);
1833 			if (error != 0) {
1834 				GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1835 				    (intmax_t)offset, error);
1836 				/*
1837 				 * Nope, this is not journal header, which
1838 				 * bascially means that journal is not
1839 				 * terminated properly.
1840 				 */
1841 				error = ENOENT;
1842 				break;
1843 			}
1844 			/*
1845 			 * Ok. This is header of _some_ journal. Now we need to
1846 			 * verify if this is header of the _next_ journal.
1847 			 */
1848 			if (jhdr.jh_journal_id != id) {
1849 				GJ_DEBUG(1, "Journal ID mismatch at %jd "
1850 				    "(0x%08x != 0x%08x).", (intmax_t)offset,
1851 				    (u_int)jhdr.jh_journal_id, (u_int)id);
1852 				error = ENOENT;
1853 				break;
1854 			}
1855 
1856 			/* Found termination. */
1857 			found++;
1858 			GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1859 			    (intmax_t)offset, (u_int)id);
1860 			sc->sc_active.jj_offset = offset;
1861 			sc->sc_journal_offset =
1862 			    offset + cp->provider->sectorsize;
1863 			sc->sc_journal_id = id;
1864 			id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1865 
1866 			while ((tbp = fbp) != NULL) {
1867 				fbp = tbp->bio_next;
1868 				GJ_LOGREQ(3, tbp, "Adding request.");
1869 				g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1870 				    tbp, M_WAITOK);
1871 			}
1872 
1873 			/* Skip journal's header. */
1874 			offset += cp->provider->sectorsize;
1875 			continue;
1876 		}
1877 
1878 		/* Skip record's header. */
1879 		offset += cp->provider->sectorsize;
1880 
1881 		/*
1882 		 * Add information about every record entry to the inactive
1883 		 * queue.
1884 		 */
1885 		if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1886 			MD5Init(&ctx);
1887 		for (i = 0; i < rhdr.jrh_nentries; i++) {
1888 			ent = &rhdr.jrh_entries[i];
1889 			GJ_DEBUG(3, "Insert entry: %jd %jd.",
1890 			    (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1891 			g_journal_insert(&fbp, ent->je_offset,
1892 			    ent->je_offset + ent->je_length, ent->je_joffset,
1893 			    NULL, M_WAITOK);
1894 			if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1895 				u_char *buf2;
1896 
1897 				/*
1898 				 * TODO: Should use faster function (like
1899 				 *       g_journal_sync_read()).
1900 				 */
1901 				buf2 = g_read_data(cp, offset, ent->je_length,
1902 				    NULL);
1903 				if (buf2 == NULL)
1904 					GJ_DEBUG(0, "Cannot read data at %jd.",
1905 					    (intmax_t)offset);
1906 				else {
1907 					MD5Update(&ctx, buf2, ent->je_length);
1908 					g_free(buf2);
1909 				}
1910 			}
1911 			/* Skip entry's data. */
1912 			offset += ent->je_length;
1913 		}
1914 		if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1915 			MD5Final(sum, &ctx);
1916 			if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1917 				GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1918 				    (intmax_t)offset);
1919 			}
1920 		}
1921 	}
1922 end:
1923 	gj_free(bp->bio_data, cp->provider->sectorsize);
1924 	g_destroy_bio(bp);
1925 
1926 	/* Remove bios from unterminated journal. */
1927 	while ((tbp = fbp) != NULL) {
1928 		fbp = tbp->bio_next;
1929 		g_destroy_bio(tbp);
1930 	}
1931 
1932 	if (found < 1 && joffset > 0) {
1933 		GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1934 		    sc->sc_name);
1935 		while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1936 			sc->sc_inactive.jj_queue = tbp->bio_next;
1937 			g_destroy_bio(tbp);
1938 		}
1939 		g_journal_initialize(sc);
1940 		g_journal_mark_as_dirty(sc);
1941 	} else {
1942 		GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1943 		g_journal_copy_start(sc);
1944 	}
1945 }
1946 
1947 /*
1948  * Wait for requests.
1949  * If we have requests in the current queue, flush them after 3 seconds from the
1950  * last flush. In this way we don't wait forever (or for journal switch) with
1951  * storing not full records on journal.
1952  */
1953 static void
1954 g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1955 {
1956 	int error, timeout;
1957 
1958 	GJ_DEBUG(3, "%s: enter", __func__);
1959 	if (sc->sc_current_count == 0) {
1960 		if (g_journal_debug < 2)
1961 			msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
1962 		else {
1963 			/*
1964 			 * If we have debug turned on, show number of elements
1965 			 * in various queues.
1966 			 */
1967 			for (;;) {
1968 				error = msleep(sc, &sc->sc_mtx, PRIBIO,
1969 				    "gj:work", hz * 3);
1970 				if (error == 0) {
1971 					mtx_unlock(&sc->sc_mtx);
1972 					break;
1973 				}
1974 				GJ_DEBUG(3, "Report: current count=%d",
1975 				    sc->sc_current_count);
1976 				GJ_DEBUG(3, "Report: flush count=%d",
1977 				    sc->sc_flush_count);
1978 				GJ_DEBUG(3, "Report: flush in progress=%d",
1979 				    sc->sc_flush_in_progress);
1980 				GJ_DEBUG(3, "Report: copy in progress=%d",
1981 				    sc->sc_copy_in_progress);
1982 				GJ_DEBUG(3, "Report: delayed=%d",
1983 				    sc->sc_delayed_count);
1984 			}
1985 		}
1986 		GJ_DEBUG(3, "%s: exit 1", __func__);
1987 		return;
1988 	}
1989 
1990 	/*
1991 	 * Flush even not full records every 3 seconds.
1992 	 */
1993 	timeout = (last_write + 3 - time_second) * hz;
1994 	if (timeout <= 0) {
1995 		mtx_unlock(&sc->sc_mtx);
1996 		g_journal_flush(sc);
1997 		g_journal_flush_send(sc);
1998 		GJ_DEBUG(3, "%s: exit 2", __func__);
1999 		return;
2000 	}
2001 	error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
2002 	if (error == EWOULDBLOCK)
2003 		g_journal_flush_send(sc);
2004 	GJ_DEBUG(3, "%s: exit 3", __func__);
2005 }
2006 
2007 /*
2008  * Worker thread.
2009  */
2010 static void
2011 g_journal_worker(void *arg)
2012 {
2013 	struct g_journal_softc *sc;
2014 	struct g_geom *gp;
2015 	struct g_provider *pp;
2016 	struct bio *bp;
2017 	time_t last_write;
2018 	int type;
2019 
2020 	thread_lock(curthread);
2021 	sched_prio(curthread, PRIBIO);
2022 	thread_unlock(curthread);
2023 
2024 	sc = arg;
2025 	type = 0;	/* gcc */
2026 
2027 	if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2028 		GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2029 		g_journal_initialize(sc);
2030 	} else {
2031 		g_journal_sync(sc);
2032 	}
2033 	/*
2034 	 * Check if we can use BIO_FLUSH.
2035 	 */
2036 	sc->sc_bio_flush = 0;
2037 	if (g_io_flush(sc->sc_jconsumer) == 0) {
2038 		sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2039 		GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2040 		    sc->sc_jconsumer->provider->name);
2041 	} else {
2042 		GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2043 		    sc->sc_jconsumer->provider->name);
2044 	}
2045 	if (sc->sc_jconsumer != sc->sc_dconsumer) {
2046 		if (g_io_flush(sc->sc_dconsumer) == 0) {
2047 			sc->sc_bio_flush |= GJ_FLUSH_DATA;
2048 			GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2049 			    sc->sc_dconsumer->provider->name);
2050 		} else {
2051 			GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2052 			    sc->sc_dconsumer->provider->name);
2053 		}
2054 	}
2055 
2056 	gp = sc->sc_geom;
2057 	g_topology_lock();
2058 	pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2059 	pp->mediasize = sc->sc_mediasize;
2060 	/*
2061 	 * There could be a problem when data provider and journal providers
2062 	 * have different sectorsize, but such scenario is prevented on journal
2063 	 * creation.
2064 	 */
2065 	pp->sectorsize = sc->sc_sectorsize;
2066 	g_error_provider(pp, 0);
2067 	g_topology_unlock();
2068 	last_write = time_second;
2069 
2070 	if (sc->sc_rootmount != NULL) {
2071 		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2072 		root_mount_rel(sc->sc_rootmount);
2073 		sc->sc_rootmount = NULL;
2074 	}
2075 
2076 	for (;;) {
2077 		/* Get first request from the queue. */
2078 		mtx_lock(&sc->sc_mtx);
2079 		bp = bioq_first(&sc->sc_back_queue);
2080 		if (bp != NULL)
2081 			type = (bp->bio_cflags & GJ_BIO_MASK);
2082 		if (bp == NULL) {
2083 			bp = bioq_first(&sc->sc_regular_queue);
2084 			if (bp != NULL)
2085 				type = GJ_BIO_REGULAR;
2086 		}
2087 		if (bp == NULL) {
2088 try_switch:
2089 			if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2090 			    (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2091 				if (sc->sc_current_count > 0) {
2092 					mtx_unlock(&sc->sc_mtx);
2093 					g_journal_flush(sc);
2094 					g_journal_flush_send(sc);
2095 					continue;
2096 				}
2097 				if (sc->sc_flush_in_progress > 0)
2098 					goto sleep;
2099 				if (sc->sc_copy_in_progress > 0)
2100 					goto sleep;
2101 			}
2102 			if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2103 				mtx_unlock(&sc->sc_mtx);
2104 				g_journal_switch(sc);
2105 				wakeup(&sc->sc_journal_copying);
2106 				continue;
2107 			}
2108 			if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2109 				GJ_DEBUG(1, "Shutting down worker "
2110 				    "thread for %s.", gp->name);
2111 				sc->sc_worker = NULL;
2112 				wakeup(&sc->sc_worker);
2113 				mtx_unlock(&sc->sc_mtx);
2114 				kproc_exit(0);
2115 			}
2116 sleep:
2117 			g_journal_wait(sc, last_write);
2118 			continue;
2119 		}
2120 		/*
2121 		 * If we're in switch process, we need to delay all new
2122 		 * write requests until its done.
2123 		 */
2124 		if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2125 		    type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2126 			GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2127 			goto try_switch;
2128 		}
2129 		if (type == GJ_BIO_REGULAR)
2130 			bioq_remove(&sc->sc_regular_queue, bp);
2131 		else
2132 			bioq_remove(&sc->sc_back_queue, bp);
2133 		mtx_unlock(&sc->sc_mtx);
2134 		switch (type) {
2135 		case GJ_BIO_REGULAR:
2136 			/* Regular request. */
2137 			switch (bp->bio_cmd) {
2138 			case BIO_READ:
2139 				g_journal_read(sc, bp, bp->bio_offset,
2140 				    bp->bio_offset + bp->bio_length);
2141 				break;
2142 			case BIO_WRITE:
2143 				last_write = time_second;
2144 				g_journal_add_request(sc, bp);
2145 				g_journal_flush_send(sc);
2146 				break;
2147 			default:
2148 				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2149 			}
2150 			break;
2151 		case GJ_BIO_COPY:
2152 			switch (bp->bio_cmd) {
2153 			case BIO_READ:
2154 				if (g_journal_copy_read_done(bp))
2155 					g_journal_copy_send(sc);
2156 				break;
2157 			case BIO_WRITE:
2158 				g_journal_copy_write_done(bp);
2159 				g_journal_copy_send(sc);
2160 				break;
2161 			default:
2162 				panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2163 			}
2164 			break;
2165 		case GJ_BIO_JOURNAL:
2166 			g_journal_flush_done(bp);
2167 			g_journal_flush_send(sc);
2168 			break;
2169 		case GJ_BIO_READ:
2170 		default:
2171 			panic("Invalid bio (%d).", type);
2172 		}
2173 	}
2174 }
2175 
2176 static void
2177 g_journal_destroy_event(void *arg, int flags __unused)
2178 {
2179 	struct g_journal_softc *sc;
2180 
2181 	g_topology_assert();
2182 	sc = arg;
2183 	g_journal_destroy(sc);
2184 }
2185 
2186 static void
2187 g_journal_timeout(void *arg)
2188 {
2189 	struct g_journal_softc *sc;
2190 
2191 	sc = arg;
2192 	GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2193 	    sc->sc_geom->name);
2194 	g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2195 }
2196 
2197 static struct g_geom *
2198 g_journal_create(struct g_class *mp, struct g_provider *pp,
2199     const struct g_journal_metadata *md)
2200 {
2201 	struct g_journal_softc *sc;
2202 	struct g_geom *gp;
2203 	struct g_consumer *cp;
2204 	int error;
2205 
2206 	sc = NULL;	/* gcc */
2207 
2208 	g_topology_assert();
2209 	/*
2210 	 * There are two possibilities:
2211 	 * 1. Data and both journals are on the same provider.
2212 	 * 2. Data and journals are all on separated providers.
2213 	 */
2214 	/* Look for journal device with the same ID. */
2215 	LIST_FOREACH(gp, &mp->geom, geom) {
2216 		sc = gp->softc;
2217 		if (sc == NULL)
2218 			continue;
2219 		if (sc->sc_id == md->md_id)
2220 			break;
2221 	}
2222 	if (gp == NULL)
2223 		sc = NULL;
2224 	else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2225 		GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2226 		return (NULL);
2227 	}
2228 	if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2229 		GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2230 		return (NULL);
2231 	}
2232 	if (md->md_type & GJ_TYPE_DATA) {
2233 		GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2234 		    pp->name);
2235 	}
2236 	if (md->md_type & GJ_TYPE_JOURNAL) {
2237 		GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2238 		    pp->name);
2239 	}
2240 
2241 	if (sc == NULL) {
2242 		/* Action geom. */
2243 		sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2244 		sc->sc_id = md->md_id;
2245 		sc->sc_type = 0;
2246 		sc->sc_flags = 0;
2247 		sc->sc_worker = NULL;
2248 
2249 		gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2250 		gp->start = g_journal_start;
2251 		gp->orphan = g_journal_orphan;
2252 		gp->access = g_journal_access;
2253 		gp->softc = sc;
2254 		gp->flags |= G_GEOM_VOLATILE_BIO;
2255 		sc->sc_geom = gp;
2256 
2257 		mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2258 
2259 		bioq_init(&sc->sc_back_queue);
2260 		bioq_init(&sc->sc_regular_queue);
2261 		bioq_init(&sc->sc_delayed_queue);
2262 		sc->sc_delayed_count = 0;
2263 		sc->sc_current_queue = NULL;
2264 		sc->sc_current_count = 0;
2265 		sc->sc_flush_queue = NULL;
2266 		sc->sc_flush_count = 0;
2267 		sc->sc_flush_in_progress = 0;
2268 		sc->sc_copy_queue = NULL;
2269 		sc->sc_copy_in_progress = 0;
2270 		sc->sc_inactive.jj_queue = NULL;
2271 		sc->sc_active.jj_queue = NULL;
2272 
2273 		sc->sc_rootmount = root_mount_hold("GJOURNAL");
2274 		GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2275 
2276 		callout_init(&sc->sc_callout, 1);
2277 		if (md->md_type != GJ_TYPE_COMPLETE) {
2278 			/*
2279 			 * Journal and data are on separate providers.
2280 			 * At this point we have only one of them.
2281 			 * We setup a timeout in case the other part will not
2282 			 * appear, so we won't wait forever.
2283 			 */
2284 			callout_reset(&sc->sc_callout, 5 * hz,
2285 			    g_journal_timeout, sc);
2286 		}
2287 	}
2288 
2289 	/* Remember type of the data provider. */
2290 	if (md->md_type & GJ_TYPE_DATA)
2291 		sc->sc_orig_type = md->md_type;
2292 	sc->sc_type |= md->md_type;
2293 	cp = NULL;
2294 
2295 	if (md->md_type & GJ_TYPE_DATA) {
2296 		if (md->md_flags & GJ_FLAG_CLEAN)
2297 			sc->sc_flags |= GJF_DEVICE_CLEAN;
2298 		if (md->md_flags & GJ_FLAG_CHECKSUM)
2299 			sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2300 		cp = g_new_consumer(gp);
2301 		error = g_attach(cp, pp);
2302 		KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2303 		    pp->name, error));
2304 		error = g_access(cp, 1, 1, 1);
2305 		if (error != 0) {
2306 			GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2307 			    error);
2308 			g_journal_destroy(sc);
2309 			return (NULL);
2310 		}
2311 		sc->sc_dconsumer = cp;
2312 		sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2313 		sc->sc_sectorsize = pp->sectorsize;
2314 		sc->sc_jstart = md->md_jstart;
2315 		sc->sc_jend = md->md_jend;
2316 		if (md->md_provider[0] != '\0')
2317 			sc->sc_flags |= GJF_DEVICE_HARDCODED;
2318 		sc->sc_journal_offset = md->md_joffset;
2319 		sc->sc_journal_id = md->md_jid;
2320 		sc->sc_journal_previous_id = md->md_jid;
2321 	}
2322 	if (md->md_type & GJ_TYPE_JOURNAL) {
2323 		if (cp == NULL) {
2324 			cp = g_new_consumer(gp);
2325 			error = g_attach(cp, pp);
2326 			KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2327 			    pp->name, error));
2328 			error = g_access(cp, 1, 1, 1);
2329 			if (error != 0) {
2330 				GJ_DEBUG(0, "Cannot access %s (error=%d).",
2331 				    pp->name, error);
2332 				g_journal_destroy(sc);
2333 				return (NULL);
2334 			}
2335 		} else {
2336 			/*
2337 			 * Journal is on the same provider as data, which means
2338 			 * that data provider ends where journal starts.
2339 			 */
2340 			sc->sc_mediasize = md->md_jstart;
2341 		}
2342 		sc->sc_jconsumer = cp;
2343 	}
2344 
2345 	/* Start switcher kproc if needed. */
2346 	if (g_journal_switcher_proc == NULL)
2347 		g_journal_start_switcher(mp);
2348 
2349 	if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2350 		/* Journal is not complete yet. */
2351 		return (gp);
2352 	} else {
2353 		/* Journal complete, cancel timeout. */
2354 		callout_drain(&sc->sc_callout);
2355 	}
2356 
2357 	error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2358 	    "g_journal %s", sc->sc_name);
2359 	if (error != 0) {
2360 		GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2361 		    sc->sc_name);
2362 		g_journal_destroy(sc);
2363 		return (NULL);
2364 	}
2365 
2366 	return (gp);
2367 }
2368 
2369 static void
2370 g_journal_destroy_consumer(void *arg, int flags __unused)
2371 {
2372 	struct g_consumer *cp;
2373 
2374 	g_topology_assert();
2375 	cp = arg;
2376 	g_detach(cp);
2377 	g_destroy_consumer(cp);
2378 }
2379 
2380 static int
2381 g_journal_destroy(struct g_journal_softc *sc)
2382 {
2383 	struct g_geom *gp;
2384 	struct g_provider *pp;
2385 	struct g_consumer *cp;
2386 
2387 	g_topology_assert();
2388 
2389 	if (sc == NULL)
2390 		return (ENXIO);
2391 
2392 	gp = sc->sc_geom;
2393 	pp = LIST_FIRST(&gp->provider);
2394 	if (pp != NULL) {
2395 		if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2396 			GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2397 			    pp->name, pp->acr, pp->acw, pp->ace);
2398 			return (EBUSY);
2399 		}
2400 		g_error_provider(pp, ENXIO);
2401 
2402 		g_journal_flush(sc);
2403 		g_journal_flush_send(sc);
2404 		g_journal_switch(sc);
2405 	}
2406 
2407 	sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2408 
2409 	g_topology_unlock();
2410 
2411 	if (sc->sc_rootmount != NULL) {
2412 		GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2413 		root_mount_rel(sc->sc_rootmount);
2414 		sc->sc_rootmount = NULL;
2415 	}
2416 
2417 	callout_drain(&sc->sc_callout);
2418 	mtx_lock(&sc->sc_mtx);
2419 	wakeup(sc);
2420 	while (sc->sc_worker != NULL)
2421 		msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2422 	mtx_unlock(&sc->sc_mtx);
2423 
2424 	if (pp != NULL) {
2425 		GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2426 		g_journal_metadata_update(sc);
2427 		g_topology_lock();
2428 		g_wither_provider(pp, ENXIO);
2429 	} else {
2430 		g_topology_lock();
2431 	}
2432 	mtx_destroy(&sc->sc_mtx);
2433 
2434 	if (sc->sc_current_count != 0) {
2435 		GJ_DEBUG(0, "Warning! Number of current requests %d.",
2436 		    sc->sc_current_count);
2437 	}
2438 
2439 	gp->softc = NULL;
2440 	LIST_FOREACH(cp, &gp->consumer, consumer) {
2441 		if (cp->acr + cp->acw + cp->ace > 0)
2442 			g_access(cp, -1, -1, -1);
2443 		/*
2444 		 * We keep all consumers open for writing, so if I'll detach
2445 		 * and destroy consumer here, I'll get providers for taste, so
2446 		 * journal will be started again.
2447 		 * Sending an event here, prevents this from happening.
2448 		 */
2449 		g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2450 	}
2451 	g_wither_geom(gp, ENXIO);
2452 	free(sc, M_JOURNAL);
2453 	return (0);
2454 }
2455 
2456 static void
2457 g_journal_taste_orphan(struct g_consumer *cp)
2458 {
2459 
2460 	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2461 	    cp->provider->name));
2462 }
2463 
2464 static struct g_geom *
2465 g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2466 {
2467 	struct g_journal_metadata md;
2468 	struct g_consumer *cp;
2469 	struct g_geom *gp;
2470 	int error;
2471 
2472 	g_topology_assert();
2473 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2474 	GJ_DEBUG(2, "Tasting %s.", pp->name);
2475 	if (pp->geom->class == mp)
2476 		return (NULL);
2477 
2478 	gp = g_new_geomf(mp, "journal:taste");
2479 	/* This orphan function should be never called. */
2480 	gp->orphan = g_journal_taste_orphan;
2481 	cp = g_new_consumer(gp);
2482 	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2483 	error = g_attach(cp, pp);
2484 	if (error == 0) {
2485 		error = g_journal_metadata_read(cp, &md);
2486 		g_detach(cp);
2487 	}
2488 	g_destroy_consumer(cp);
2489 	g_destroy_geom(gp);
2490 	if (error != 0)
2491 		return (NULL);
2492 	gp = NULL;
2493 
2494 	if (md.md_provider[0] != '\0' &&
2495 	    !g_compare_names(md.md_provider, pp->name))
2496 		return (NULL);
2497 	if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2498 		return (NULL);
2499 	if (g_journal_debug >= 2)
2500 		journal_metadata_dump(&md);
2501 
2502 	gp = g_journal_create(mp, pp, &md);
2503 	return (gp);
2504 }
2505 
2506 static struct g_journal_softc *
2507 g_journal_find_device(struct g_class *mp, const char *name)
2508 {
2509 	struct g_journal_softc *sc;
2510 	struct g_geom *gp;
2511 	struct g_provider *pp;
2512 
2513 	if (strncmp(name, _PATH_DEV, 5) == 0)
2514 		name += 5;
2515 	LIST_FOREACH(gp, &mp->geom, geom) {
2516 		sc = gp->softc;
2517 		if (sc == NULL)
2518 			continue;
2519 		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2520 			continue;
2521 		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2522 			continue;
2523 		pp = LIST_FIRST(&gp->provider);
2524 		if (strcmp(sc->sc_name, name) == 0)
2525 			return (sc);
2526 		if (pp != NULL && strcmp(pp->name, name) == 0)
2527 			return (sc);
2528 	}
2529 	return (NULL);
2530 }
2531 
2532 static void
2533 g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2534 {
2535 	struct g_journal_softc *sc;
2536 	const char *name;
2537 	char param[16];
2538 	int *nargs;
2539 	int error, i;
2540 
2541 	g_topology_assert();
2542 
2543 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2544 	if (nargs == NULL) {
2545 		gctl_error(req, "No '%s' argument.", "nargs");
2546 		return;
2547 	}
2548 	if (*nargs <= 0) {
2549 		gctl_error(req, "Missing device(s).");
2550 		return;
2551 	}
2552 
2553 	for (i = 0; i < *nargs; i++) {
2554 		snprintf(param, sizeof(param), "arg%d", i);
2555 		name = gctl_get_asciiparam(req, param);
2556 		if (name == NULL) {
2557 			gctl_error(req, "No 'arg%d' argument.", i);
2558 			return;
2559 		}
2560 		sc = g_journal_find_device(mp, name);
2561 		if (sc == NULL) {
2562 			gctl_error(req, "No such device: %s.", name);
2563 			return;
2564 		}
2565 		error = g_journal_destroy(sc);
2566 		if (error != 0) {
2567 			gctl_error(req, "Cannot destroy device %s (error=%d).",
2568 			    LIST_FIRST(&sc->sc_geom->provider)->name, error);
2569 			return;
2570 		}
2571 	}
2572 }
2573 
2574 static void
2575 g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2576 {
2577 
2578 	g_topology_assert();
2579 	g_topology_unlock();
2580 	g_journal_sync_requested++;
2581 	wakeup(&g_journal_switcher_state);
2582 	while (g_journal_sync_requested > 0)
2583 		tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2584 	g_topology_lock();
2585 }
2586 
2587 static void
2588 g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2589 {
2590 	uint32_t *version;
2591 
2592 	g_topology_assert();
2593 
2594 	version = gctl_get_paraml(req, "version", sizeof(*version));
2595 	if (version == NULL) {
2596 		gctl_error(req, "No '%s' argument.", "version");
2597 		return;
2598 	}
2599 	if (*version != G_JOURNAL_VERSION) {
2600 		gctl_error(req, "Userland and kernel parts are out of sync.");
2601 		return;
2602 	}
2603 
2604 	if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2605 		g_journal_ctl_destroy(req, mp);
2606 		return;
2607 	} else if (strcmp(verb, "sync") == 0) {
2608 		g_journal_ctl_sync(req, mp);
2609 		return;
2610 	}
2611 
2612 	gctl_error(req, "Unknown verb.");
2613 }
2614 
2615 static void
2616 g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2617     struct g_consumer *cp, struct g_provider *pp)
2618 {
2619 	struct g_journal_softc *sc;
2620 
2621 	g_topology_assert();
2622 
2623 	sc = gp->softc;
2624 	if (sc == NULL)
2625 		return;
2626 	if (pp != NULL) {
2627 		/* Nothing here. */
2628 	} else if (cp != NULL) {
2629 		int first = 1;
2630 
2631 		sbuf_printf(sb, "%s<Role>", indent);
2632 		if (cp == sc->sc_dconsumer) {
2633 			sbuf_cat(sb, "Data");
2634 			first = 0;
2635 		}
2636 		if (cp == sc->sc_jconsumer) {
2637 			if (!first)
2638 				sbuf_cat(sb, ",");
2639 			sbuf_cat(sb, "Journal");
2640 		}
2641 		sbuf_cat(sb, "</Role>\n");
2642 		if (cp == sc->sc_jconsumer) {
2643 			sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2644 			    (intmax_t)sc->sc_jstart);
2645 			sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2646 			    (intmax_t)sc->sc_jend);
2647 		}
2648 	} else {
2649 		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2650 	}
2651 }
2652 
2653 static eventhandler_tag g_journal_event_shutdown = NULL;
2654 static eventhandler_tag g_journal_event_lowmem = NULL;
2655 
2656 static void
2657 g_journal_shutdown(void *arg, int howto __unused)
2658 {
2659 	struct g_class *mp;
2660 	struct g_geom *gp, *gp2;
2661 
2662 	if (KERNEL_PANICKED())
2663 		return;
2664 	mp = arg;
2665 	g_topology_lock();
2666 	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2667 		if (gp->softc == NULL)
2668 			continue;
2669 		GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2670 		g_journal_destroy(gp->softc);
2671 	}
2672 	g_topology_unlock();
2673 }
2674 
2675 /*
2676  * Free cached requests from inactive queue in case of low memory.
2677  * We free GJ_FREE_AT_ONCE elements at once.
2678  */
2679 #define	GJ_FREE_AT_ONCE	4
2680 static void
2681 g_journal_lowmem(void *arg, int howto __unused)
2682 {
2683 	struct g_journal_softc *sc;
2684 	struct g_class *mp;
2685 	struct g_geom *gp;
2686 	struct bio *bp;
2687 	u_int nfree = GJ_FREE_AT_ONCE;
2688 
2689 	g_journal_stats_low_mem++;
2690 	mp = arg;
2691 	g_topology_lock();
2692 	LIST_FOREACH(gp, &mp->geom, geom) {
2693 		sc = gp->softc;
2694 		if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2695 			continue;
2696 		mtx_lock(&sc->sc_mtx);
2697 		for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2698 		    nfree--, bp = bp->bio_next) {
2699 			/*
2700 			 * This is safe to free the bio_data, because:
2701 			 * 1. If bio_data is NULL it will be read from the
2702 			 *    inactive journal.
2703 			 * 2. If bp is sent down, it is first removed from the
2704 			 *    inactive queue, so it's impossible to free the
2705 			 *    data from under in-flight bio.
2706 			 * On the other hand, freeing elements from the active
2707 			 * queue, is not safe.
2708 			 */
2709 			if (bp->bio_data != NULL) {
2710 				GJ_DEBUG(2, "Freeing data from %s.",
2711 				    sc->sc_name);
2712 				gj_free(bp->bio_data, bp->bio_length);
2713 				bp->bio_data = NULL;
2714 			}
2715 		}
2716 		mtx_unlock(&sc->sc_mtx);
2717 		if (nfree == 0)
2718 			break;
2719 	}
2720 	g_topology_unlock();
2721 }
2722 
2723 static void g_journal_switcher(void *arg);
2724 
2725 static void
2726 g_journal_init(struct g_class *mp)
2727 {
2728 
2729 	/* Pick a conservative value if provided value sucks. */
2730 	if (g_journal_cache_divisor <= 0 ||
2731 	    (vm_kmem_size / g_journal_cache_divisor == 0)) {
2732 		g_journal_cache_divisor = 5;
2733 	}
2734 	if (g_journal_cache_limit > 0) {
2735 		g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2736 		g_journal_cache_low =
2737 		    (g_journal_cache_limit / 100) * g_journal_cache_switch;
2738 	}
2739 	g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2740 	    g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2741 	if (g_journal_event_shutdown == NULL)
2742 		GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2743 	g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2744 	    g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2745 	if (g_journal_event_lowmem == NULL)
2746 		GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2747 }
2748 
2749 static void
2750 g_journal_fini(struct g_class *mp)
2751 {
2752 
2753 	if (g_journal_event_shutdown != NULL) {
2754 		EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2755 		    g_journal_event_shutdown);
2756 	}
2757 	if (g_journal_event_lowmem != NULL)
2758 		EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2759 	if (g_journal_switcher_proc != NULL)
2760 		g_journal_stop_switcher();
2761 }
2762 
2763 DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2764 
2765 static const struct g_journal_desc *
2766 g_journal_find_desc(const char *fstype)
2767 {
2768 	const struct g_journal_desc *desc;
2769 	int i;
2770 
2771 	for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2772 	     desc = g_journal_filesystems[++i]) {
2773 		if (strcmp(desc->jd_fstype, fstype) == 0)
2774 			break;
2775 	}
2776 	return (desc);
2777 }
2778 
2779 static void
2780 g_journal_switch_wait(struct g_journal_softc *sc)
2781 {
2782 	struct bintime bt;
2783 
2784 	mtx_assert(&sc->sc_mtx, MA_OWNED);
2785 	if (g_journal_debug >= 2) {
2786 		if (sc->sc_flush_in_progress > 0) {
2787 			GJ_DEBUG(2, "%d requests flushing.",
2788 			    sc->sc_flush_in_progress);
2789 		}
2790 		if (sc->sc_copy_in_progress > 0) {
2791 			GJ_DEBUG(2, "%d requests copying.",
2792 			    sc->sc_copy_in_progress);
2793 		}
2794 		if (sc->sc_flush_count > 0) {
2795 			GJ_DEBUG(2, "%d requests to flush.",
2796 			    sc->sc_flush_count);
2797 		}
2798 		if (sc->sc_delayed_count > 0) {
2799 			GJ_DEBUG(2, "%d requests delayed.",
2800 			    sc->sc_delayed_count);
2801 		}
2802 	}
2803 	g_journal_stats_switches++;
2804 	if (sc->sc_copy_in_progress > 0)
2805 		g_journal_stats_wait_for_copy++;
2806 	GJ_TIMER_START(1, &bt);
2807 	sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2808 	sc->sc_flags |= GJF_DEVICE_SWITCH;
2809 	wakeup(sc);
2810 	while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2811 		msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2812 		    "gj:switch", 0);
2813 	}
2814 	GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2815 }
2816 
2817 static void
2818 g_journal_do_switch(struct g_class *classp)
2819 {
2820 	struct g_journal_softc *sc;
2821 	const struct g_journal_desc *desc;
2822 	struct g_geom *gp;
2823 	struct mount *mp;
2824 	struct bintime bt;
2825 	char *mountpoint;
2826 	int error, save;
2827 
2828 	g_topology_lock();
2829 	LIST_FOREACH(gp, &classp->geom, geom) {
2830 		sc = gp->softc;
2831 		if (sc == NULL)
2832 			continue;
2833 		if (sc->sc_flags & GJF_DEVICE_DESTROY)
2834 			continue;
2835 		if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2836 			continue;
2837 		mtx_lock(&sc->sc_mtx);
2838 		sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2839 		mtx_unlock(&sc->sc_mtx);
2840 	}
2841 	g_topology_unlock();
2842 
2843 	mtx_lock(&mountlist_mtx);
2844 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2845 		if (mp->mnt_gjprovider == NULL)
2846 			continue;
2847 		if (mp->mnt_flag & MNT_RDONLY)
2848 			continue;
2849 		desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2850 		if (desc == NULL)
2851 			continue;
2852 		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2853 			continue;
2854 		/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2855 
2856 		g_topology_lock();
2857 		sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2858 		g_topology_unlock();
2859 
2860 		if (sc == NULL) {
2861 			GJ_DEBUG(0, "Cannot find journal geom for %s.",
2862 			    mp->mnt_gjprovider);
2863 			goto next;
2864 		} else if (JEMPTY(sc)) {
2865 			mtx_lock(&sc->sc_mtx);
2866 			sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2867 			mtx_unlock(&sc->sc_mtx);
2868 			GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2869 			goto next;
2870 		}
2871 
2872 		mountpoint = mp->mnt_stat.f_mntonname;
2873 
2874 		error = vn_start_write(NULL, &mp, V_WAIT);
2875 		if (error != 0) {
2876 			GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2877 			    mountpoint, error);
2878 			goto next;
2879 		}
2880 
2881 		save = curthread_pflags_set(TDP_SYNCIO);
2882 
2883 		GJ_TIMER_START(1, &bt);
2884 		vfs_periodic(mp, MNT_NOWAIT);
2885 		GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2886 
2887 		GJ_TIMER_START(1, &bt);
2888 		error = VFS_SYNC(mp, MNT_NOWAIT);
2889 		if (error == 0)
2890 			GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2891 		else {
2892 			GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2893 			    mountpoint, error);
2894 		}
2895 
2896 		curthread_pflags_restore(save);
2897 
2898 		vn_finished_write(mp);
2899 
2900 		if (error != 0)
2901 			goto next;
2902 
2903 		/*
2904 		 * Send BIO_FLUSH before freezing the file system, so it can be
2905 		 * faster after the freeze.
2906 		 */
2907 		GJ_TIMER_START(1, &bt);
2908 		g_journal_flush_cache(sc);
2909 		GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2910 
2911 		GJ_TIMER_START(1, &bt);
2912 		error = vfs_write_suspend(mp, VS_SKIP_UNMOUNT);
2913 		GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2914 		if (error != 0) {
2915 			GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2916 			    mountpoint, error);
2917 			goto next;
2918 		}
2919 
2920 		error = desc->jd_clean(mp);
2921 		if (error != 0)
2922 			goto next;
2923 
2924 		mtx_lock(&sc->sc_mtx);
2925 		g_journal_switch_wait(sc);
2926 		mtx_unlock(&sc->sc_mtx);
2927 
2928 		vfs_write_resume(mp, 0);
2929 next:
2930 		mtx_lock(&mountlist_mtx);
2931 		vfs_unbusy(mp);
2932 	}
2933 	mtx_unlock(&mountlist_mtx);
2934 
2935 	sc = NULL;
2936 	for (;;) {
2937 		g_topology_lock();
2938 		LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2939 			sc = gp->softc;
2940 			if (sc == NULL)
2941 				continue;
2942 			mtx_lock(&sc->sc_mtx);
2943 			if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2944 			    !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2945 			    (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2946 				break;
2947 			}
2948 			mtx_unlock(&sc->sc_mtx);
2949 			sc = NULL;
2950 		}
2951 		g_topology_unlock();
2952 		if (sc == NULL)
2953 			break;
2954 		mtx_assert(&sc->sc_mtx, MA_OWNED);
2955 		g_journal_switch_wait(sc);
2956 		mtx_unlock(&sc->sc_mtx);
2957 	}
2958 }
2959 
2960 static void
2961 g_journal_start_switcher(struct g_class *mp)
2962 {
2963 	int error __diagused;
2964 
2965 	g_topology_assert();
2966 	MPASS(g_journal_switcher_proc == NULL);
2967 	g_journal_switcher_state = GJ_SWITCHER_WORKING;
2968 	error = kproc_create(g_journal_switcher, mp, &g_journal_switcher_proc,
2969 	    0, 0, "g_journal switcher");
2970 	KASSERT(error == 0, ("Cannot create switcher thread."));
2971 }
2972 
2973 static void
2974 g_journal_stop_switcher(void)
2975 {
2976 	g_topology_assert();
2977 	MPASS(g_journal_switcher_proc != NULL);
2978 	g_journal_switcher_state = GJ_SWITCHER_DIE;
2979 	wakeup(&g_journal_switcher_state);
2980 	while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2981 		tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2982 	GJ_DEBUG(1, "Switcher died.");
2983 	g_journal_switcher_proc = NULL;
2984 }
2985 
2986 /*
2987  * TODO: Kill switcher thread on last geom destruction?
2988  */
2989 static void
2990 g_journal_switcher(void *arg)
2991 {
2992 	struct g_class *mp;
2993 	struct bintime bt;
2994 	int error;
2995 
2996 	mp = arg;
2997 	curthread->td_pflags |= TDP_NORUNNINGBUF;
2998 	for (;;) {
2999 		g_journal_switcher_wokenup = 0;
3000 		error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3001 		    g_journal_switch_time * hz);
3002 		if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3003 			g_journal_switcher_state = GJ_SWITCHER_DIED;
3004 			GJ_DEBUG(1, "Switcher exiting.");
3005 			wakeup(&g_journal_switcher_state);
3006 			kproc_exit(0);
3007 		}
3008 		if (error == 0 && g_journal_sync_requested == 0) {
3009 			GJ_DEBUG(1, "Out of cache, force switch (used=%jd "
3010 			    "limit=%jd).", (intmax_t)g_journal_cache_used,
3011 			    (intmax_t)g_journal_cache_limit);
3012 		}
3013 		GJ_TIMER_START(1, &bt);
3014 		g_journal_do_switch(mp);
3015 		GJ_TIMER_STOP(1, &bt, "Entire switch time");
3016 		if (g_journal_sync_requested > 0) {
3017 			g_journal_sync_requested = 0;
3018 			wakeup(&g_journal_sync_requested);
3019 		}
3020 	}
3021 }
3022