xref: /illumos-gate/usr/src/uts/common/fs/ufs/lufs.c (revision 0dee7919e2f2a6479d16b370af93747b9416b242)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/systm.h>
30 #include <sys/types.h>
31 #include <sys/vnode.h>
32 #include <sys/buf.h>
33 #include <sys/errno.h>
34 #include <sys/fssnap_if.h>
35 #include <sys/fs/ufs_inode.h>
36 #include <sys/fs/ufs_filio.h>
37 #include <sys/sysmacros.h>
38 #include <sys/modctl.h>
39 #include <sys/fs/ufs_log.h>
40 #include <sys/fs/ufs_bio.h>
41 #include <sys/fs/ufs_fsdir.h>
42 #include <sys/debug.h>
43 #include <sys/atomic.h>
44 #include <sys/kmem.h>
45 #include <sys/inttypes.h>
46 #include <sys/vfs.h>
47 #include <sys/mntent.h>
48 #include <sys/conf.h>
49 #include <sys/param.h>
50 #include <sys/kstat.h>
51 #include <sys/cmn_err.h>
52 
53 static kmutex_t	log_mutex;	/* general purpose log layer lock */
54 kmutex_t	ml_scan;	/* Scan thread syncronization */
55 kcondvar_t	ml_scan_cv;	/* Scan thread syncronization */
56 
57 struct kmem_cache	*lufs_sv;
58 struct kmem_cache	*lufs_bp;
59 
60 /* Tunables */
61 uint_t		ldl_maxlogsize	= LDL_MAXLOGSIZE;
62 uint_t		ldl_minlogsize	= LDL_MINLOGSIZE;
63 uint32_t	ldl_divisor	= LDL_DIVISOR;
64 uint32_t	ldl_mintransfer	= LDL_MINTRANSFER;
65 uint32_t	ldl_maxtransfer	= LDL_MAXTRANSFER;
66 uint32_t	ldl_minbufsize	= LDL_MINBUFSIZE;
67 
68 uint32_t	last_loghead_ident = 0;
69 
70 /*
71  * Logging delta and roll statistics
72  */
73 struct delta_kstats {
74 	kstat_named_t ds_superblock_deltas;
75 	kstat_named_t ds_bitmap_deltas;
76 	kstat_named_t ds_suminfo_deltas;
77 	kstat_named_t ds_allocblk_deltas;
78 	kstat_named_t ds_ab0_deltas;
79 	kstat_named_t ds_dir_deltas;
80 	kstat_named_t ds_inode_deltas;
81 	kstat_named_t ds_fbiwrite_deltas;
82 	kstat_named_t ds_quota_deltas;
83 	kstat_named_t ds_shadow_deltas;
84 
85 	kstat_named_t ds_superblock_rolled;
86 	kstat_named_t ds_bitmap_rolled;
87 	kstat_named_t ds_suminfo_rolled;
88 	kstat_named_t ds_allocblk_rolled;
89 	kstat_named_t ds_ab0_rolled;
90 	kstat_named_t ds_dir_rolled;
91 	kstat_named_t ds_inode_rolled;
92 	kstat_named_t ds_fbiwrite_rolled;
93 	kstat_named_t ds_quota_rolled;
94 	kstat_named_t ds_shadow_rolled;
95 } dkstats = {
96 	{ "superblock_deltas",	KSTAT_DATA_UINT64 },
97 	{ "bitmap_deltas",	KSTAT_DATA_UINT64 },
98 	{ "suminfo_deltas",	KSTAT_DATA_UINT64 },
99 	{ "allocblk_deltas",	KSTAT_DATA_UINT64 },
100 	{ "ab0_deltas",		KSTAT_DATA_UINT64 },
101 	{ "dir_deltas",		KSTAT_DATA_UINT64 },
102 	{ "inode_deltas",	KSTAT_DATA_UINT64 },
103 	{ "fbiwrite_deltas",	KSTAT_DATA_UINT64 },
104 	{ "quota_deltas",	KSTAT_DATA_UINT64 },
105 	{ "shadow_deltas",	KSTAT_DATA_UINT64 },
106 
107 	{ "superblock_rolled",	KSTAT_DATA_UINT64 },
108 	{ "bitmap_rolled",	KSTAT_DATA_UINT64 },
109 	{ "suminfo_rolled",	KSTAT_DATA_UINT64 },
110 	{ "allocblk_rolled",	KSTAT_DATA_UINT64 },
111 	{ "ab0_rolled",		KSTAT_DATA_UINT64 },
112 	{ "dir_rolled",		KSTAT_DATA_UINT64 },
113 	{ "inode_rolled",	KSTAT_DATA_UINT64 },
114 	{ "fbiwrite_rolled",	KSTAT_DATA_UINT64 },
115 	{ "quota_rolled",	KSTAT_DATA_UINT64 },
116 	{ "shadow_rolled",	KSTAT_DATA_UINT64 }
117 };
118 
119 uint64_t delta_stats[DT_MAX];
120 uint64_t roll_stats[DT_MAX];
121 
122 /*
123  * General logging kstats
124  */
125 struct logstats logstats = {
126 	{ "master_reads",		KSTAT_DATA_UINT64 },
127 	{ "master_writes",		KSTAT_DATA_UINT64 },
128 	{ "log_reads_inmem",		KSTAT_DATA_UINT64 },
129 	{ "log_reads",			KSTAT_DATA_UINT64 },
130 	{ "log_writes",			KSTAT_DATA_UINT64 },
131 	{ "log_master_reads",		KSTAT_DATA_UINT64 },
132 	{ "log_roll_reads",		KSTAT_DATA_UINT64 },
133 	{ "log_roll_writes",		KSTAT_DATA_UINT64 }
134 };
135 
136 int
137 trans_not_done(struct buf *cb)
138 {
139 	sema_v(&cb->b_io);
140 	return (0);
141 }
142 
143 static void
144 trans_wait_panic(struct buf *cb)
145 {
146 	while ((cb->b_flags & B_DONE) == 0)
147 		drv_usecwait(10);
148 }
149 
150 int
151 trans_not_wait(struct buf *cb)
152 {
153 	/*
154 	 * In case of panic, busy wait for completion
155 	 */
156 	if (panicstr)
157 		trans_wait_panic(cb);
158 	else
159 		sema_p(&cb->b_io);
160 
161 	return (geterror(cb));
162 }
163 
164 int
165 trans_wait(struct buf *cb)
166 {
167 	/*
168 	 * In case of panic, busy wait for completion and run md daemon queues
169 	 */
170 	if (panicstr)
171 		trans_wait_panic(cb);
172 	return (biowait(cb));
173 }
174 
175 static void
176 setsum(int32_t *sp, int32_t *lp, int nb)
177 {
178 	int32_t csum = 0;
179 
180 	*sp = 0;
181 	nb /= sizeof (int32_t);
182 	while (nb--)
183 		csum += *lp++;
184 	*sp = csum;
185 }
186 
187 static int
188 checksum(int32_t *sp, int32_t *lp, int nb)
189 {
190 	int32_t ssum = *sp;
191 
192 	setsum(sp, lp, nb);
193 	if (ssum != *sp) {
194 		*sp = ssum;
195 		return (0);
196 	}
197 	return (1);
198 }
199 
200 void
201 lufs_unsnarf(ufsvfs_t *ufsvfsp)
202 {
203 	ml_unit_t *ul;
204 	mt_map_t *mtm;
205 
206 	ul = ufsvfsp->vfs_log;
207 	if (ul == NULL)
208 		return;
209 
210 	mtm = ul->un_logmap;
211 
212 	/*
213 	 * Wait for a pending top_issue_sync which is
214 	 * dispatched (via taskq_dispatch()) but hasnt completed yet.
215 	 */
216 
217 	mutex_enter(&mtm->mtm_lock);
218 
219 	while (mtm->mtm_taskq_sync_count != 0) {
220 		cv_wait(&mtm->mtm_cv, &mtm->mtm_lock);
221 	}
222 
223 	mutex_exit(&mtm->mtm_lock);
224 
225 	/* Roll committed transactions */
226 	logmap_roll_dev(ul);
227 
228 	/* Kill the roll thread */
229 	logmap_kill_roll(ul);
230 
231 	/* release saved alloction info */
232 	if (ul->un_ebp)
233 		kmem_free(ul->un_ebp, ul->un_nbeb);
234 
235 	/* release circular bufs */
236 	free_cirbuf(&ul->un_rdbuf);
237 	free_cirbuf(&ul->un_wrbuf);
238 
239 	/* release maps */
240 	if (ul->un_logmap)
241 		ul->un_logmap = map_put(ul->un_logmap);
242 	if (ul->un_deltamap)
243 		ul->un_deltamap = map_put(ul->un_deltamap);
244 	if (ul->un_matamap)
245 		ul->un_matamap = map_put(ul->un_matamap);
246 
247 	mutex_destroy(&ul->un_log_mutex);
248 	mutex_destroy(&ul->un_state_mutex);
249 
250 	/* release state buffer MUST BE LAST!! (contains our ondisk data) */
251 	if (ul->un_bp)
252 		brelse(ul->un_bp);
253 	kmem_free(ul, sizeof (*ul));
254 
255 	ufsvfsp->vfs_log = NULL;
256 }
257 
258 int
259 lufs_snarf(ufsvfs_t *ufsvfsp, struct fs *fs, int ronly)
260 {
261 	buf_t		*bp, *tbp;
262 	ml_unit_t	*ul;
263 	extent_block_t	*ebp;
264 	ic_extent_block_t  *nebp;
265 	size_t		nb;
266 	daddr_t		bno;	/* in disk blocks */
267 	int		i;
268 
269 	/* LINTED: warning: logical expression always true: op "||" */
270 	ASSERT(sizeof (ml_odunit_t) < DEV_BSIZE);
271 
272 	/*
273 	 * Get the allocation table
274 	 *	During a remount the superblock pointed to by the ufsvfsp
275 	 *	is out of date.  Hence the need for the ``new'' superblock
276 	 *	pointer, fs, passed in as a parameter.
277 	 */
278 	bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, logbtodb(fs, fs->fs_logbno),
279 	    fs->fs_bsize);
280 	if (bp->b_flags & B_ERROR) {
281 		brelse(bp);
282 		return (EIO);
283 	}
284 	ebp = (void *)bp->b_un.b_addr;
285 	if (!checksum(&ebp->chksum, (int32_t *)bp->b_un.b_addr,
286 		fs->fs_bsize)) {
287 		brelse(bp);
288 		return (ENODEV);
289 	}
290 
291 	/*
292 	 * It is possible to get log blocks with all zeros.
293 	 * We should also check for nextents to be zero in such case.
294 	 */
295 	if (ebp->type != LUFS_EXTENTS || ebp->nextents == 0) {
296 		brelse(bp);
297 		return (EDOM);
298 	}
299 	/*
300 	 * Put allocation into memory.  This requires conversion between
301 	 * on the ondisk format of the extent (type extent_t) and the
302 	 * in-core format of the extent (type ic_extent_t).  The
303 	 * difference is the in-core form of the extent block stores
304 	 * the physical offset of the extent in disk blocks, which
305 	 * can require more than a 32-bit field.
306 	 */
307 	nb = (size_t)(sizeof (ic_extent_block_t) +
308 			((ebp->nextents - 1) * sizeof (ic_extent_t)));
309 	nebp = kmem_alloc(nb, KM_SLEEP);
310 	nebp->ic_nextents = ebp->nextents;
311 	nebp->ic_nbytes = ebp->nbytes;
312 	nebp->ic_nextbno = ebp->nextbno;
313 	for (i = 0; i < ebp->nextents; i++) {
314 		nebp->ic_extents[i].ic_lbno = ebp->extents[i].lbno;
315 		nebp->ic_extents[i].ic_nbno = ebp->extents[i].nbno;
316 		nebp->ic_extents[i].ic_pbno =
317 		    logbtodb(fs, ebp->extents[i].pbno);
318 	}
319 	brelse(bp);
320 
321 	/*
322 	 * Get the log state
323 	 */
324 	bno = nebp->ic_extents[0].ic_pbno;
325 	bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, bno, DEV_BSIZE);
326 	if (bp->b_flags & B_ERROR) {
327 		brelse(bp);
328 		bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, bno + 1, DEV_BSIZE);
329 		if (bp->b_flags & B_ERROR) {
330 			brelse(bp);
331 			kmem_free(nebp, nb);
332 			return (EIO);
333 		}
334 	}
335 
336 	/*
337 	 * Put ondisk struct into an anonymous buffer
338 	 *	This buffer will contain the memory for the ml_odunit struct
339 	 */
340 	tbp = ngeteblk(dbtob(LS_SECTORS));
341 	tbp->b_edev = bp->b_edev;
342 	tbp->b_dev = bp->b_dev;
343 	tbp->b_blkno = bno;
344 	bcopy(bp->b_un.b_addr, tbp->b_un.b_addr, DEV_BSIZE);
345 	bcopy(bp->b_un.b_addr, tbp->b_un.b_addr + DEV_BSIZE, DEV_BSIZE);
346 	bp->b_flags |= (B_STALE | B_AGE);
347 	brelse(bp);
348 	bp = tbp;
349 
350 	/*
351 	 * Verify the log state
352 	 *
353 	 * read/only mounts w/bad logs are allowed.  umount will
354 	 * eventually roll the bad log until the first IO error.
355 	 * fsck will then repair the file system.
356 	 *
357 	 * read/write mounts with bad logs are not allowed.
358 	 *
359 	 */
360 	ul = (ml_unit_t *)kmem_zalloc(sizeof (*ul), KM_SLEEP);
361 	bcopy(bp->b_un.b_addr, &ul->un_ondisk, sizeof (ml_odunit_t));
362 	if ((ul->un_chksum != ul->un_head_ident + ul->un_tail_ident) ||
363 	    (ul->un_version != LUFS_VERSION_LATEST) ||
364 	    (!ronly && ul->un_badlog)) {
365 		kmem_free(ul, sizeof (*ul));
366 		brelse(bp);
367 		kmem_free(nebp, nb);
368 		return (EIO);
369 	}
370 	/*
371 	 * Initialize the incore-only fields
372 	 */
373 	if (ronly)
374 		ul->un_flags |= LDL_NOROLL;
375 	ul->un_bp = bp;
376 	ul->un_ufsvfs = ufsvfsp;
377 	ul->un_dev = ufsvfsp->vfs_dev;
378 	ul->un_ebp = nebp;
379 	ul->un_nbeb = nb;
380 	ul->un_maxresv = btodb(ul->un_logsize) * LDL_USABLE_BSIZE;
381 	ul->un_deltamap = map_get(ul, deltamaptype, DELTAMAP_NHASH);
382 	ul->un_logmap = map_get(ul, logmaptype, LOGMAP_NHASH);
383 	if (ul->un_debug & MT_MATAMAP)
384 		ul->un_matamap = map_get(ul, matamaptype, DELTAMAP_NHASH);
385 	mutex_init(&ul->un_log_mutex, NULL, MUTEX_DEFAULT, NULL);
386 	mutex_init(&ul->un_state_mutex, NULL, MUTEX_DEFAULT, NULL);
387 	ufsvfsp->vfs_log = ul;
388 
389 	/* remember the state of the log before the log scan */
390 	logmap_logscan(ul);
391 
392 	/*
393 	 * Error during scan
394 	 *
395 	 * If this is a read/only mount; ignore the error.
396 	 * At a later time umount/fsck will repair the fs.
397 	 *
398 	 */
399 	if (ul->un_flags & LDL_ERROR) {
400 		if (!ronly) {
401 			lufs_unsnarf(ufsvfsp);
402 			return (EIO);
403 		}
404 		ul->un_flags &= ~LDL_ERROR;
405 	}
406 	if (!ronly)
407 		logmap_start_roll(ul);
408 	return (0);
409 }
410 
411 static int
412 lufs_initialize(
413 	ufsvfs_t *ufsvfsp,
414 	daddr_t bno,
415 	size_t nb,
416 	struct fiolog *flp)
417 {
418 	ml_odunit_t	*ud, *ud2;
419 	buf_t		*bp;
420 	struct timeval	tv;
421 
422 	/* LINTED: warning: logical expression always true: op "||" */
423 	ASSERT(sizeof (ml_odunit_t) < DEV_BSIZE);
424 	ASSERT(nb >= ldl_minlogsize);
425 
426 	bp = UFS_GETBLK(ufsvfsp, ufsvfsp->vfs_dev, bno, dbtob(LS_SECTORS));
427 	bzero(bp->b_un.b_addr, bp->b_bcount);
428 
429 	ud = (void *)bp->b_un.b_addr;
430 	ud->od_version = LUFS_VERSION_LATEST;
431 	ud->od_maxtransfer = MIN(ufsvfsp->vfs_iotransz, ldl_maxtransfer);
432 	if (ud->od_maxtransfer < ldl_mintransfer)
433 		ud->od_maxtransfer = ldl_mintransfer;
434 	ud->od_devbsize = DEV_BSIZE;
435 
436 	ud->od_requestsize = flp->nbytes_actual;
437 	ud->od_statesize = dbtob(LS_SECTORS);
438 	ud->od_logsize = nb - ud->od_statesize;
439 
440 	ud->od_statebno = INT32_C(0);
441 
442 	uniqtime(&tv);
443 	if (tv.tv_usec == last_loghead_ident) {
444 		tv.tv_usec++;
445 	}
446 	last_loghead_ident = tv.tv_usec;
447 	ud->od_head_ident = tv.tv_usec;
448 	ud->od_tail_ident = ud->od_head_ident;
449 	ud->od_chksum = ud->od_head_ident + ud->od_tail_ident;
450 
451 	ud->od_bol_lof = dbtob(ud->od_statebno) + ud->od_statesize;
452 	ud->od_eol_lof = ud->od_bol_lof + ud->od_logsize;
453 	ud->od_head_lof = ud->od_bol_lof;
454 	ud->od_tail_lof = ud->od_bol_lof;
455 
456 	ASSERT(lufs_initialize_debug(ud));
457 
458 	ud2 = (void *)(bp->b_un.b_addr + DEV_BSIZE);
459 	bcopy(ud, ud2, sizeof (*ud));
460 
461 	UFS_BWRITE2(ufsvfsp, bp);
462 	if (bp->b_flags & B_ERROR) {
463 		brelse(bp);
464 		return (EIO);
465 	}
466 	brelse(bp);
467 
468 	return (0);
469 }
470 
471 /*
472  * Free log space
473  *	Assumes the file system is write locked and is not logging
474  */
475 static int
476 lufs_free(struct ufsvfs *ufsvfsp)
477 {
478 	int		error = 0, i, j;
479 	buf_t		*bp = NULL;
480 	extent_t	*ep;
481 	extent_block_t	*ebp;
482 	struct fs	*fs = ufsvfsp->vfs_fs;
483 	daddr_t		fno;
484 	int32_t		logbno;
485 	long		nfno;
486 	inode_t		*ip = NULL;
487 	char		clean;
488 
489 	/*
490 	 * Nothing to free
491 	 */
492 	if (fs->fs_logbno == 0)
493 		return (0);
494 
495 	/*
496 	 * Mark the file system as FSACTIVE and no log but honor the
497 	 * current value of fs_reclaim.  The reclaim thread could have
498 	 * been active when lufs_disable() was called and if fs_reclaim
499 	 * is reset to zero here it could lead to lost inodes.
500 	 */
501 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
502 	mutex_enter(&ufsvfsp->vfs_lock);
503 	clean = fs->fs_clean;
504 	logbno = fs->fs_logbno;
505 	fs->fs_clean = FSACTIVE;
506 	fs->fs_logbno = INT32_C(0);
507 	ufs_sbwrite(ufsvfsp);
508 	mutex_exit(&ufsvfsp->vfs_lock);
509 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
510 	if (ufsvfsp->vfs_bufp->b_flags & B_ERROR) {
511 		error = EIO;
512 		fs->fs_clean = clean;
513 		fs->fs_logbno = logbno;
514 		goto errout;
515 	}
516 
517 	/*
518 	 * fetch the allocation block
519 	 *	superblock -> one block of extents -> log data
520 	 */
521 	bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, logbtodb(fs, logbno),
522 	    fs->fs_bsize);
523 	if (bp->b_flags & B_ERROR) {
524 		error = EIO;
525 		goto errout;
526 	}
527 
528 	/*
529 	 * Free up the allocated space (dummy inode needed for free())
530 	 */
531 	ip = ufs_alloc_inode(ufsvfsp, UFSROOTINO);
532 	ebp = (void *)bp->b_un.b_addr;
533 	for (i = 0, ep = &ebp->extents[0]; i < ebp->nextents; ++i, ++ep) {
534 		fno = logbtofrag(fs, ep->pbno);
535 		nfno = dbtofsb(fs, ep->nbno);
536 		for (j = 0; j < nfno; j += fs->fs_frag, fno += fs->fs_frag)
537 			free(ip, fno, fs->fs_bsize, 0);
538 	}
539 	free(ip, logbtofrag(fs, logbno), fs->fs_bsize, 0);
540 	brelse(bp);
541 	bp = NULL;
542 
543 	/*
544 	 * Push the metadata dirtied during the allocations
545 	 */
546 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
547 	sbupdate(ufsvfsp->vfs_vfs);
548 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
549 	bflush(ufsvfsp->vfs_dev);
550 	error = bfinval(ufsvfsp->vfs_dev, 0);
551 	if (error)
552 		goto errout;
553 
554 	/*
555 	 * Free the dummy inode
556 	 */
557 	ufs_free_inode(ip);
558 
559 	return (0);
560 
561 errout:
562 	/*
563 	 * Free up all resources
564 	 */
565 	if (bp)
566 		brelse(bp);
567 	if (ip)
568 		ufs_free_inode(ip);
569 	return (error);
570 }
571 
572 /*
573  * Allocate log space
574  *	Assumes the file system is write locked and is not logging
575  */
576 static int
577 lufs_alloc(struct ufsvfs *ufsvfsp, struct fiolog *flp, cred_t *cr)
578 {
579 	int		error = 0;
580 	buf_t		*bp = NULL;
581 	extent_t	*ep, *nep;
582 	extent_block_t	*ebp;
583 	struct fs	*fs = ufsvfsp->vfs_fs;
584 	daddr_t		fno;	/* in frags */
585 	daddr_t		bno;	/* in disk blocks */
586 	int32_t		logbno = INT32_C(0);	/* will be fs_logbno */
587 	struct inode	*ip = NULL;
588 	size_t		nb = flp->nbytes_actual;
589 	size_t		tb = 0;
590 
591 	/*
592 	 * Mark the file system as FSACTIVE
593 	 */
594 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
595 	mutex_enter(&ufsvfsp->vfs_lock);
596 	fs->fs_clean = FSACTIVE;
597 	ufs_sbwrite(ufsvfsp);
598 	mutex_exit(&ufsvfsp->vfs_lock);
599 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
600 
601 	/*
602 	 * Allocate the allocation block (need dummy shadow inode;
603 	 * we use a shadow inode so the quota sub-system ignores
604 	 * the block allocations.)
605 	 *	superblock -> one block of extents -> log data
606 	 */
607 	ip = ufs_alloc_inode(ufsvfsp, UFSROOTINO);
608 	ip->i_mode = IFSHAD;		/* make the dummy a shadow inode */
609 	rw_enter(&ip->i_contents, RW_WRITER);
610 	fno = contigpref(ufsvfsp, nb + fs->fs_bsize);
611 	error = alloc(ip, fno, fs->fs_bsize, &fno, cr);
612 	if (error)
613 		goto errout;
614 	bno = fsbtodb(fs, fno);
615 
616 	bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, bno, fs->fs_bsize);
617 	if (bp->b_flags & B_ERROR) {
618 		error = EIO;
619 		goto errout;
620 	}
621 
622 	ebp = (void *)bp->b_un.b_addr;
623 	ebp->type = LUFS_EXTENTS;
624 	ebp->nextbno = UINT32_C(0);
625 	ebp->nextents = UINT32_C(0);
626 	ebp->chksum = INT32_C(0);
627 	if (fs->fs_magic == FS_MAGIC)
628 		logbno = bno;
629 	else
630 		logbno = dbtofsb(fs, bno);
631 
632 	/*
633 	 * Initialize the first extent
634 	 */
635 	ep = &ebp->extents[0];
636 	error = alloc(ip, fno + fs->fs_frag, fs->fs_bsize, &fno, cr);
637 	if (error)
638 		goto errout;
639 	bno = fsbtodb(fs, fno);
640 
641 	ep->lbno = UINT32_C(0);
642 	if (fs->fs_magic == FS_MAGIC)
643 		ep->pbno = (uint32_t)bno;
644 	else
645 		ep->pbno = (uint32_t)fno;
646 	ep->nbno = (uint32_t)fsbtodb(fs, fs->fs_frag);
647 	ebp->nextents = UINT32_C(1);
648 	tb = fs->fs_bsize;
649 	nb -= fs->fs_bsize;
650 
651 	while (nb) {
652 		error = alloc(ip, fno + fs->fs_frag, fs->fs_bsize, &fno, cr);
653 		if (error) {
654 			if (tb < ldl_minlogsize)
655 				goto errout;
656 			error = 0;
657 			break;
658 		}
659 		bno = fsbtodb(fs, fno);
660 		if ((daddr_t)((logbtodb(fs, ep->pbno) + ep->nbno) == bno))
661 			ep->nbno += (uint32_t)(fsbtodb(fs, fs->fs_frag));
662 		else {
663 			nep = ep + 1;
664 			if ((caddr_t)(nep + 1) >
665 			    (bp->b_un.b_addr + fs->fs_bsize)) {
666 				free(ip, fno, fs->fs_bsize, 0);
667 				break;
668 			}
669 			nep->lbno = ep->lbno + ep->nbno;
670 			if (fs->fs_magic == FS_MAGIC)
671 				nep->pbno = (uint32_t)bno;
672 			else
673 				nep->pbno = (uint32_t)fno;
674 			nep->nbno = (uint32_t)(fsbtodb(fs, fs->fs_frag));
675 			ebp->nextents++;
676 			ep = nep;
677 		}
678 		tb += fs->fs_bsize;
679 		nb -= fs->fs_bsize;
680 	}
681 	ebp->nbytes = (uint32_t)tb;
682 	setsum(&ebp->chksum, (int32_t *)bp->b_un.b_addr, fs->fs_bsize);
683 	UFS_BWRITE2(ufsvfsp, bp);
684 	if (bp->b_flags & B_ERROR) {
685 		error = EIO;
686 		goto errout;
687 	}
688 	/*
689 	 * Initialize the first two sectors of the log
690 	 */
691 	error = lufs_initialize(ufsvfsp, logbtodb(fs, ebp->extents[0].pbno),
692 	    tb, flp);
693 	if (error)
694 		goto errout;
695 
696 	/*
697 	 * We are done initializing the allocation block and the log
698 	 */
699 	brelse(bp);
700 	bp = NULL;
701 
702 	/*
703 	 * Update the superblock and push the dirty metadata
704 	 */
705 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
706 	sbupdate(ufsvfsp->vfs_vfs);
707 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
708 	bflush(ufsvfsp->vfs_dev);
709 	error = bfinval(ufsvfsp->vfs_dev, 1);
710 	if (error)
711 		goto errout;
712 	if (ufsvfsp->vfs_bufp->b_flags & B_ERROR) {
713 		error = EIO;
714 		goto errout;
715 	}
716 
717 	/*
718 	 * Everything is safely on disk; update log space pointer in sb
719 	 */
720 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
721 	mutex_enter(&ufsvfsp->vfs_lock);
722 	fs->fs_logbno = (uint32_t)logbno;
723 	ufs_sbwrite(ufsvfsp);
724 	mutex_exit(&ufsvfsp->vfs_lock);
725 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
726 
727 	/*
728 	 * Free the dummy inode
729 	 */
730 	rw_exit(&ip->i_contents);
731 	ufs_free_inode(ip);
732 
733 	/* inform user of real log size */
734 	flp->nbytes_actual = tb;
735 	return (0);
736 
737 errout:
738 	/*
739 	 * Free all resources
740 	 */
741 	if (bp)
742 		brelse(bp);
743 	if (logbno) {
744 		fs->fs_logbno = logbno;
745 		(void) lufs_free(ufsvfsp);
746 	}
747 	if (ip) {
748 		rw_exit(&ip->i_contents);
749 		ufs_free_inode(ip);
750 	}
751 	return (error);
752 }
753 
754 /*
755  * Disable logging
756  */
757 int
758 lufs_disable(vnode_t *vp, struct fiolog *flp)
759 {
760 	int		error = 0;
761 	inode_t		*ip = VTOI(vp);
762 	ufsvfs_t	*ufsvfsp = ip->i_ufsvfs;
763 	struct fs	*fs = ufsvfsp->vfs_fs;
764 	struct lockfs	lf;
765 	struct ulockfs	*ulp;
766 
767 	flp->error = FIOLOG_ENONE;
768 
769 	/*
770 	 * Logging is already disabled; done
771 	 */
772 	if (fs->fs_logbno == 0 || ufsvfsp->vfs_log == NULL)
773 		return (0);
774 
775 	/*
776 	 * Readonly file system
777 	 */
778 	if (fs->fs_ronly) {
779 		flp->error = FIOLOG_EROFS;
780 		return (0);
781 	}
782 
783 	/*
784 	 * File system must be write locked to disable logging
785 	 */
786 	error = ufs_fiolfss(vp, &lf);
787 	if (error) {
788 		return (error);
789 	}
790 	if (!LOCKFS_IS_ULOCK(&lf)) {
791 		flp->error = FIOLOG_EULOCK;
792 		return (0);
793 	}
794 	lf.lf_lock = LOCKFS_WLOCK;
795 	lf.lf_flags = 0;
796 	lf.lf_comment = NULL;
797 	error = ufs_fiolfs(vp, &lf, 1);
798 	if (error) {
799 		flp->error = FIOLOG_EWLOCK;
800 		return (0);
801 	}
802 
803 	if (ufsvfsp->vfs_log == NULL || fs->fs_logbno == 0)
804 		goto errout;
805 
806 	/*
807 	 * WE ARE COMMITTED TO DISABLING LOGGING PAST THIS POINT
808 	 */
809 
810 	/*
811 	 * Disable logging:
812 	 * Suspend the reclaim thread and force the delete thread to exit.
813 	 *	When a nologging mount has completed there may still be
814 	 *	work for reclaim to do so just suspend this thread until
815 	 *	it's [deadlock-] safe for it to continue.  The delete
816 	 *	thread won't be needed as ufs_iinactive() calls
817 	 *	ufs_delete() when logging is disabled.
818 	 * Freeze and drain reader ops.
819 	 *	Commit any outstanding reader transactions (ufs_flush).
820 	 *	Set the ``unmounted'' bit in the ufstrans struct.
821 	 *	If debug, remove metadata from matamap.
822 	 *	Disable matamap processing.
823 	 *	NULL the trans ops table.
824 	 *	Free all of the incore structs related to logging.
825 	 * Allow reader ops.
826 	 */
827 	ufs_thread_suspend(&ufsvfsp->vfs_reclaim);
828 	ufs_thread_exit(&ufsvfsp->vfs_delete);
829 
830 	vfs_lock_wait(ufsvfsp->vfs_vfs);
831 	ulp = &ufsvfsp->vfs_ulockfs;
832 	mutex_enter(&ulp->ul_lock);
833 	atomic_add_long(&ufs_quiesce_pend, 1);
834 	(void) ufs_quiesce(ulp);
835 
836 	(void) ufs_flush(ufsvfsp->vfs_vfs);
837 
838 	TRANS_MATA_UMOUNT(ufsvfsp);
839 	ufsvfsp->vfs_domatamap = 0;
840 
841 	/*
842 	 * Free all of the incore structs
843 	 */
844 	(void) lufs_unsnarf(ufsvfsp);
845 
846 	atomic_add_long(&ufs_quiesce_pend, -1);
847 	mutex_exit(&ulp->ul_lock);
848 	vfs_setmntopt(ufsvfsp->vfs_vfs, MNTOPT_NOLOGGING, NULL, 0);
849 	vfs_unlock(ufsvfsp->vfs_vfs);
850 
851 	fs->fs_rolled = FS_ALL_ROLLED;
852 	ufsvfsp->vfs_nolog_si = 0;
853 
854 	/*
855 	 * Free the log space and mark the superblock as FSACTIVE
856 	 */
857 	(void) lufs_free(ufsvfsp);
858 
859 	/*
860 	 * Allow the reclaim thread to continue.
861 	 */
862 	ufs_thread_continue(&ufsvfsp->vfs_reclaim);
863 
864 	/*
865 	 * Unlock the file system
866 	 */
867 	lf.lf_lock = LOCKFS_ULOCK;
868 	lf.lf_flags = 0;
869 	error = ufs_fiolfs(vp, &lf, 1);
870 	if (error)
871 		flp->error = FIOLOG_ENOULOCK;
872 
873 	return (0);
874 
875 errout:
876 	lf.lf_lock = LOCKFS_ULOCK;
877 	lf.lf_flags = 0;
878 	(void) ufs_fiolfs(vp, &lf, 1);
879 	return (error);
880 }
881 
882 /*
883  * Enable logging
884  */
885 int
886 lufs_enable(struct vnode *vp, struct fiolog *flp, cred_t *cr)
887 {
888 	int		error;
889 	int		reclaim;
890 	inode_t		*ip = VTOI(vp);
891 	ufsvfs_t	*ufsvfsp = ip->i_ufsvfs;
892 	struct fs	*fs;
893 	ml_unit_t	*ul;
894 	struct lockfs	lf;
895 	struct ulockfs	*ulp;
896 	vfs_t		*vfsp = ufsvfsp->vfs_vfs;
897 	uint64_t	tmp_nbytes_actual;
898 
899 	/*
900 	 * Check if logging is already enabled
901 	 */
902 	if (ufsvfsp->vfs_log) {
903 		flp->error = FIOLOG_ETRANS;
904 		/* for root ensure logging option is set */
905 		vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
906 		return (0);
907 	}
908 	fs = ufsvfsp->vfs_fs;
909 
910 	/*
911 	 * Come back here to recheck if we had to disable the log.
912 	 */
913 recheck:
914 	error = 0;
915 	reclaim = 0;
916 	flp->error = FIOLOG_ENONE;
917 
918 	/*
919 	 * Adjust requested log size
920 	 */
921 	flp->nbytes_actual = flp->nbytes_requested;
922 	if (flp->nbytes_actual == 0) {
923 		tmp_nbytes_actual =
924 		    (((uint64_t)fs->fs_size) / ldl_divisor) << fs->fs_fshift;
925 		flp->nbytes_actual = (uint_t)MIN(tmp_nbytes_actual, INT_MAX);
926 	}
927 	flp->nbytes_actual = MAX(flp->nbytes_actual, ldl_minlogsize);
928 	flp->nbytes_actual = MIN(flp->nbytes_actual, ldl_maxlogsize);
929 	flp->nbytes_actual = blkroundup(fs, flp->nbytes_actual);
930 
931 	/*
932 	 * logging is enabled and the log is the right size; done
933 	 */
934 	ul = ufsvfsp->vfs_log;
935 	if (ul && fs->fs_logbno && (flp->nbytes_actual == ul->un_requestsize))
936 			return (0);
937 
938 	/*
939 	 * Readonly file system
940 	 */
941 	if (fs->fs_ronly) {
942 		flp->error = FIOLOG_EROFS;
943 		return (0);
944 	}
945 
946 	/*
947 	 * File system must be write locked to enable logging
948 	 */
949 	error = ufs_fiolfss(vp, &lf);
950 	if (error) {
951 		return (error);
952 	}
953 	if (!LOCKFS_IS_ULOCK(&lf)) {
954 		flp->error = FIOLOG_EULOCK;
955 		return (0);
956 	}
957 	lf.lf_lock = LOCKFS_WLOCK;
958 	lf.lf_flags = 0;
959 	lf.lf_comment = NULL;
960 	error = ufs_fiolfs(vp, &lf, 1);
961 	if (error) {
962 		flp->error = FIOLOG_EWLOCK;
963 		return (0);
964 	}
965 
966 	/*
967 	 * File system must be fairly consistent to enable logging
968 	 */
969 	if (fs->fs_clean != FSLOG &&
970 	    fs->fs_clean != FSACTIVE &&
971 	    fs->fs_clean != FSSTABLE &&
972 	    fs->fs_clean != FSCLEAN) {
973 		flp->error = FIOLOG_ECLEAN;
974 		goto unlockout;
975 	}
976 
977 	/*
978 	 * A write-locked file system is only active if there are
979 	 * open deleted files; so remember to set FS_RECLAIM later.
980 	 */
981 	if (fs->fs_clean == FSACTIVE)
982 		reclaim = FS_RECLAIM;
983 
984 	/*
985 	 * Logging is already enabled; must be changing the log's size
986 	 */
987 	if (fs->fs_logbno && ufsvfsp->vfs_log) {
988 		/*
989 		 * Before we can disable logging, we must give up our
990 		 * lock.  As a consequence of unlocking and disabling the
991 		 * log, the fs structure may change.  Because of this, when
992 		 * disabling is complete, we will go back to recheck to
993 		 * repeat all of the checks that we performed to get to
994 		 * this point.  Disabling sets fs->fs_logbno to 0, so this
995 		 * will not put us into an infinite loop.
996 		 */
997 		lf.lf_lock = LOCKFS_ULOCK;
998 		lf.lf_flags = 0;
999 		error = ufs_fiolfs(vp, &lf, 1);
1000 		if (error) {
1001 			flp->error = FIOLOG_ENOULOCK;
1002 			return (0);
1003 		}
1004 		error = lufs_disable(vp, flp);
1005 		if (error || (flp->error != FIOLOG_ENONE))
1006 			return (0);
1007 		goto recheck;
1008 	}
1009 
1010 	error = lufs_alloc(ufsvfsp, flp, cr);
1011 	if (error)
1012 		goto errout;
1013 
1014 	/*
1015 	 * Create all of the incore structs
1016 	 */
1017 	error = lufs_snarf(ufsvfsp, fs, 0);
1018 	if (error)
1019 		goto errout;
1020 
1021 	/*
1022 	 * DON'T ``GOTO ERROUT'' PAST THIS POINT
1023 	 */
1024 
1025 	/*
1026 	 * Pretend we were just mounted with logging enabled
1027 	 *	freeze and drain the file system of readers
1028 	 *		Get the ops vector
1029 	 *		If debug, record metadata locations with log subsystem
1030 	 *		Start the delete thread
1031 	 *		Start the reclaim thread, if necessary
1032 	 *	Thaw readers
1033 	 */
1034 	vfs_lock_wait(vfsp);
1035 	vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
1036 	ulp = &ufsvfsp->vfs_ulockfs;
1037 	mutex_enter(&ulp->ul_lock);
1038 	atomic_add_long(&ufs_quiesce_pend, 1);
1039 	(void) ufs_quiesce(ulp);
1040 
1041 	TRANS_DOMATAMAP(ufsvfsp);
1042 	TRANS_MATA_MOUNT(ufsvfsp);
1043 	TRANS_MATA_SI(ufsvfsp, fs);
1044 	ufs_thread_start(&ufsvfsp->vfs_delete, ufs_thread_delete, vfsp);
1045 	if (fs->fs_reclaim & (FS_RECLAIM|FS_RECLAIMING)) {
1046 		fs->fs_reclaim &= ~FS_RECLAIM;
1047 		fs->fs_reclaim |=  FS_RECLAIMING;
1048 		ufs_thread_start(&ufsvfsp->vfs_reclaim,
1049 					ufs_thread_reclaim, vfsp);
1050 	} else
1051 		fs->fs_reclaim |= reclaim;
1052 
1053 	atomic_add_long(&ufs_quiesce_pend, -1);
1054 	mutex_exit(&ulp->ul_lock);
1055 	vfs_unlock(vfsp);
1056 
1057 	/*
1058 	 * Unlock the file system
1059 	 */
1060 	lf.lf_lock = LOCKFS_ULOCK;
1061 	lf.lf_flags = 0;
1062 	error = ufs_fiolfs(vp, &lf, 1);
1063 	if (error) {
1064 		flp->error = FIOLOG_ENOULOCK;
1065 		return (0);
1066 	}
1067 
1068 	/*
1069 	 * There's nothing in the log yet (we've just allocated it)
1070 	 * so directly write out the super block.
1071 	 * Note, we have to force this sb out to disk
1072 	 * (not just to the log) so that if we crash we know we are logging
1073 	 */
1074 	mutex_enter(&ufsvfsp->vfs_lock);
1075 	fs->fs_clean = FSLOG;
1076 	fs->fs_rolled = FS_NEED_ROLL; /* Mark the fs as unrolled */
1077 	UFS_BWRITE2(NULL, ufsvfsp->vfs_bufp);
1078 	mutex_exit(&ufsvfsp->vfs_lock);
1079 
1080 	return (0);
1081 
1082 errout:
1083 	(void) lufs_unsnarf(ufsvfsp);
1084 	(void) lufs_free(ufsvfsp);
1085 unlockout:
1086 	lf.lf_lock = LOCKFS_ULOCK;
1087 	lf.lf_flags = 0;
1088 	(void) ufs_fiolfs(vp, &lf, 1);
1089 	return (error);
1090 }
1091 
1092 void
1093 lufs_read_strategy(ml_unit_t *ul, buf_t *bp)
1094 {
1095 	mt_map_t	*logmap	= ul->un_logmap;
1096 	offset_t	mof	= ldbtob(bp->b_blkno);
1097 	off_t		nb	= bp->b_bcount;
1098 	mapentry_t	*age;
1099 	char		*va;
1100 	int		(*saviodone)();
1101 	int		entire_range;
1102 
1103 	/*
1104 	 * get a linked list of overlapping deltas
1105 	 * returns with &mtm->mtm_rwlock held
1106 	 */
1107 	entire_range = logmap_list_get(logmap, mof, nb, &age);
1108 
1109 	/*
1110 	 * no overlapping deltas were found; read master
1111 	 */
1112 	if (age == NULL) {
1113 		rw_exit(&logmap->mtm_rwlock);
1114 		if (ul->un_flags & LDL_ERROR) {
1115 			bp->b_flags |= B_ERROR;
1116 			bp->b_error = EIO;
1117 			biodone(bp);
1118 		} else {
1119 			ul->un_ufsvfs->vfs_iotstamp = lbolt;
1120 			logstats.ls_lreads.value.ui64++;
1121 			(void) bdev_strategy(bp);
1122 			lwp_stat_update(LWP_STAT_INBLK, 1);
1123 		}
1124 		return;
1125 	}
1126 
1127 	va = bp_mapin_common(bp, VM_SLEEP);
1128 	/*
1129 	 * if necessary, sync read the data from master
1130 	 *	errors are returned in bp
1131 	 */
1132 	if (!entire_range) {
1133 		saviodone = bp->b_iodone;
1134 		bp->b_iodone = trans_not_done;
1135 		logstats.ls_mreads.value.ui64++;
1136 		(void) bdev_strategy(bp);
1137 		lwp_stat_update(LWP_STAT_INBLK, 1);
1138 		if (trans_not_wait(bp))
1139 			ldl_seterror(ul, "Error reading master");
1140 		bp->b_iodone = saviodone;
1141 	}
1142 
1143 	/*
1144 	 * sync read the data from the log
1145 	 *	errors are returned inline
1146 	 */
1147 	if (ldl_read(ul, va, mof, nb, age)) {
1148 		bp->b_flags |= B_ERROR;
1149 		bp->b_error = EIO;
1150 	}
1151 
1152 	/*
1153 	 * unlist the deltas
1154 	 */
1155 	logmap_list_put(logmap, age);
1156 
1157 	/*
1158 	 * all done
1159 	 */
1160 	if (ul->un_flags & LDL_ERROR) {
1161 		bp->b_flags |= B_ERROR;
1162 		bp->b_error = EIO;
1163 	}
1164 	biodone(bp);
1165 }
1166 
1167 void
1168 lufs_write_strategy(ml_unit_t *ul, buf_t *bp)
1169 {
1170 	offset_t	mof	= ldbtob(bp->b_blkno);
1171 	off_t		nb	= bp->b_bcount;
1172 	char		*va;
1173 	mapentry_t	*me;
1174 
1175 	ASSERT((nb & DEV_BMASK) == 0);
1176 	ul->un_logmap->mtm_ref = 1;
1177 
1178 	/*
1179 	 * if there are deltas, move into log
1180 	 */
1181 	me = deltamap_remove(ul->un_deltamap, mof, nb);
1182 	if (me) {
1183 
1184 		va = bp_mapin_common(bp, VM_SLEEP);
1185 
1186 		ASSERT(((ul->un_debug & MT_WRITE_CHECK) == 0) ||
1187 			(ul->un_matamap == NULL)||
1188 			matamap_within(ul->un_matamap, mof, nb));
1189 
1190 		/*
1191 		 * move to logmap
1192 		 */
1193 		if (ufs_crb_enable) {
1194 			logmap_add_buf(ul, va, mof, me,
1195 			    bp->b_un.b_addr, nb);
1196 		} else {
1197 			logmap_add(ul, va, mof, me);
1198 		}
1199 
1200 		if (ul->un_flags & LDL_ERROR) {
1201 			bp->b_flags |= B_ERROR;
1202 			bp->b_error = EIO;
1203 		}
1204 		biodone(bp);
1205 		return;
1206 	}
1207 	if (ul->un_flags & LDL_ERROR) {
1208 		bp->b_flags |= B_ERROR;
1209 		bp->b_error = EIO;
1210 		biodone(bp);
1211 		return;
1212 	}
1213 
1214 	/*
1215 	 * Check that we are not updating metadata, or if so then via B_PHYS.
1216 	 */
1217 	ASSERT((ul->un_matamap == NULL) ||
1218 		!(matamap_overlap(ul->un_matamap, mof, nb) &&
1219 		((bp->b_flags & B_PHYS) == 0)));
1220 
1221 	ul->un_ufsvfs->vfs_iotstamp = lbolt;
1222 	logstats.ls_lwrites.value.ui64++;
1223 
1224 	/* If snapshots are enabled, write through the snapshot driver */
1225 	if (ul->un_ufsvfs->vfs_snapshot)
1226 		fssnap_strategy(&ul->un_ufsvfs->vfs_snapshot, bp);
1227 	else
1228 		(void) bdev_strategy(bp);
1229 
1230 	lwp_stat_update(LWP_STAT_OUBLK, 1);
1231 }
1232 
1233 void
1234 lufs_strategy(ml_unit_t *ul, buf_t *bp)
1235 {
1236 	if (bp->b_flags & B_READ)
1237 		lufs_read_strategy(ul, bp);
1238 	else
1239 		lufs_write_strategy(ul, bp);
1240 }
1241 
1242 /* ARGSUSED */
1243 static int
1244 delta_stats_update(kstat_t *ksp, int rw)
1245 {
1246 	if (rw == KSTAT_WRITE) {
1247 		delta_stats[DT_SB] = dkstats.ds_superblock_deltas.value.ui64;
1248 		delta_stats[DT_CG] = dkstats.ds_bitmap_deltas.value.ui64;
1249 		delta_stats[DT_SI] = dkstats.ds_suminfo_deltas.value.ui64;
1250 		delta_stats[DT_AB] = dkstats.ds_allocblk_deltas.value.ui64;
1251 		delta_stats[DT_ABZERO] = dkstats.ds_ab0_deltas.value.ui64;
1252 		delta_stats[DT_DIR] = dkstats.ds_dir_deltas.value.ui64;
1253 		delta_stats[DT_INODE] = dkstats.ds_inode_deltas.value.ui64;
1254 		delta_stats[DT_FBI] = dkstats.ds_fbiwrite_deltas.value.ui64;
1255 		delta_stats[DT_QR] = dkstats.ds_quota_deltas.value.ui64;
1256 		delta_stats[DT_SHAD] = dkstats.ds_shadow_deltas.value.ui64;
1257 
1258 		roll_stats[DT_SB] = dkstats.ds_superblock_rolled.value.ui64;
1259 		roll_stats[DT_CG] = dkstats.ds_bitmap_rolled.value.ui64;
1260 		roll_stats[DT_SI] = dkstats.ds_suminfo_rolled.value.ui64;
1261 		roll_stats[DT_AB] = dkstats.ds_allocblk_rolled.value.ui64;
1262 		roll_stats[DT_ABZERO] = dkstats.ds_ab0_rolled.value.ui64;
1263 		roll_stats[DT_DIR] = dkstats.ds_dir_rolled.value.ui64;
1264 		roll_stats[DT_INODE] = dkstats.ds_inode_rolled.value.ui64;
1265 		roll_stats[DT_FBI] = dkstats.ds_fbiwrite_rolled.value.ui64;
1266 		roll_stats[DT_QR] = dkstats.ds_quota_rolled.value.ui64;
1267 		roll_stats[DT_SHAD] = dkstats.ds_shadow_rolled.value.ui64;
1268 	} else {
1269 		dkstats.ds_superblock_deltas.value.ui64 = delta_stats[DT_SB];
1270 		dkstats.ds_bitmap_deltas.value.ui64 = delta_stats[DT_CG];
1271 		dkstats.ds_suminfo_deltas.value.ui64 = delta_stats[DT_SI];
1272 		dkstats.ds_allocblk_deltas.value.ui64 = delta_stats[DT_AB];
1273 		dkstats.ds_ab0_deltas.value.ui64 = delta_stats[DT_ABZERO];
1274 		dkstats.ds_dir_deltas.value.ui64 = delta_stats[DT_DIR];
1275 		dkstats.ds_inode_deltas.value.ui64 = delta_stats[DT_INODE];
1276 		dkstats.ds_fbiwrite_deltas.value.ui64 = delta_stats[DT_FBI];
1277 		dkstats.ds_quota_deltas.value.ui64 = delta_stats[DT_QR];
1278 		dkstats.ds_shadow_deltas.value.ui64 = delta_stats[DT_SHAD];
1279 
1280 		dkstats.ds_superblock_rolled.value.ui64 = roll_stats[DT_SB];
1281 		dkstats.ds_bitmap_rolled.value.ui64 = roll_stats[DT_CG];
1282 		dkstats.ds_suminfo_rolled.value.ui64 = roll_stats[DT_SI];
1283 		dkstats.ds_allocblk_rolled.value.ui64 = roll_stats[DT_AB];
1284 		dkstats.ds_ab0_rolled.value.ui64 = roll_stats[DT_ABZERO];
1285 		dkstats.ds_dir_rolled.value.ui64 = roll_stats[DT_DIR];
1286 		dkstats.ds_inode_rolled.value.ui64 = roll_stats[DT_INODE];
1287 		dkstats.ds_fbiwrite_rolled.value.ui64 = roll_stats[DT_FBI];
1288 		dkstats.ds_quota_rolled.value.ui64 = roll_stats[DT_QR];
1289 		dkstats.ds_shadow_rolled.value.ui64 = roll_stats[DT_SHAD];
1290 	}
1291 	return (0);
1292 }
1293 
1294 extern size_t ufs_crb_limit;
1295 extern int ufs_max_crb_divisor;
1296 
1297 void
1298 lufs_init(void)
1299 {
1300 	kstat_t *ksp;
1301 
1302 	/* Create kmem caches */
1303 	lufs_sv = kmem_cache_create("lufs_save", sizeof (lufs_save_t), 0,
1304 	    NULL, NULL, NULL, NULL, NULL, 0);
1305 	lufs_bp = kmem_cache_create("lufs_bufs", sizeof (lufs_buf_t), 0,
1306 	    NULL, NULL, NULL, NULL, NULL, 0);
1307 
1308 	mutex_init(&log_mutex, NULL, MUTEX_DEFAULT, NULL);
1309 
1310 	_init_top();
1311 
1312 	if (&bio_lufs_strategy != NULL)
1313 		bio_lufs_strategy = (void (*) (void *, buf_t *)) lufs_strategy;
1314 
1315 	/*
1316 	 * Initialise general logging and delta kstats
1317 	 */
1318 	ksp = kstat_create("ufs_log", 0, "logstats", "ufs", KSTAT_TYPE_NAMED,
1319 	    sizeof (logstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
1320 	if (ksp) {
1321 		ksp->ks_data = (void *) &logstats;
1322 		kstat_install(ksp);
1323 	}
1324 
1325 	ksp = kstat_create("ufs_log", 0, "deltastats", "ufs", KSTAT_TYPE_NAMED,
1326 	    sizeof (dkstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
1327 	if (ksp) {
1328 		ksp->ks_data = (void *) &dkstats;
1329 		ksp->ks_update = delta_stats_update;
1330 		kstat_install(ksp);
1331 	}
1332 
1333 	/*
1334 	 * Set up the maximum amount of kmem that the crbs (system wide)
1335 	 * can use.
1336 	 */
1337 	ufs_crb_limit = kmem_maxavail() / ufs_max_crb_divisor;
1338 }
1339