xref: /titanic_41/usr/src/uts/common/fs/ufs/lufs.c (revision 22337b4b3c8a2b9db615e524e0e9e1fbd3dc71bf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/systm.h>
29 #include <sys/types.h>
30 #include <sys/vnode.h>
31 #include <sys/buf.h>
32 #include <sys/errno.h>
33 #include <sys/fssnap_if.h>
34 #include <sys/fs/ufs_inode.h>
35 #include <sys/fs/ufs_filio.h>
36 #include <sys/sysmacros.h>
37 #include <sys/modctl.h>
38 #include <sys/fs/ufs_log.h>
39 #include <sys/fs/ufs_bio.h>
40 #include <sys/fs/ufs_fsdir.h>
41 #include <sys/debug.h>
42 #include <sys/atomic.h>
43 #include <sys/kmem.h>
44 #include <sys/inttypes.h>
45 #include <sys/vfs.h>
46 #include <sys/mntent.h>
47 #include <sys/conf.h>
48 #include <sys/param.h>
49 #include <sys/kstat.h>
50 #include <sys/cmn_err.h>
51 
52 extern	kmutex_t	ufs_scan_lock;
53 
54 static kmutex_t	log_mutex;	/* general purpose log layer lock */
55 kmutex_t	ml_scan;	/* Scan thread syncronization */
56 kcondvar_t	ml_scan_cv;	/* Scan thread syncronization */
57 
58 struct kmem_cache	*lufs_sv;
59 struct kmem_cache	*lufs_bp;
60 
61 /* Tunables */
62 uint_t		ldl_maxlogsize	= LDL_MAXLOGSIZE;
63 uint_t		ldl_minlogsize	= LDL_MINLOGSIZE;
64 uint32_t	ldl_divisor	= LDL_DIVISOR;
65 uint32_t	ldl_mintransfer	= LDL_MINTRANSFER;
66 uint32_t	ldl_maxtransfer	= LDL_MAXTRANSFER;
67 uint32_t	ldl_minbufsize	= LDL_MINBUFSIZE;
68 
69 uint32_t	last_loghead_ident = 0;
70 
71 /*
72  * Logging delta and roll statistics
73  */
74 struct delta_kstats {
75 	kstat_named_t ds_superblock_deltas;
76 	kstat_named_t ds_bitmap_deltas;
77 	kstat_named_t ds_suminfo_deltas;
78 	kstat_named_t ds_allocblk_deltas;
79 	kstat_named_t ds_ab0_deltas;
80 	kstat_named_t ds_dir_deltas;
81 	kstat_named_t ds_inode_deltas;
82 	kstat_named_t ds_fbiwrite_deltas;
83 	kstat_named_t ds_quota_deltas;
84 	kstat_named_t ds_shadow_deltas;
85 
86 	kstat_named_t ds_superblock_rolled;
87 	kstat_named_t ds_bitmap_rolled;
88 	kstat_named_t ds_suminfo_rolled;
89 	kstat_named_t ds_allocblk_rolled;
90 	kstat_named_t ds_ab0_rolled;
91 	kstat_named_t ds_dir_rolled;
92 	kstat_named_t ds_inode_rolled;
93 	kstat_named_t ds_fbiwrite_rolled;
94 	kstat_named_t ds_quota_rolled;
95 	kstat_named_t ds_shadow_rolled;
96 } dkstats = {
97 	{ "superblock_deltas",	KSTAT_DATA_UINT64 },
98 	{ "bitmap_deltas",	KSTAT_DATA_UINT64 },
99 	{ "suminfo_deltas",	KSTAT_DATA_UINT64 },
100 	{ "allocblk_deltas",	KSTAT_DATA_UINT64 },
101 	{ "ab0_deltas",		KSTAT_DATA_UINT64 },
102 	{ "dir_deltas",		KSTAT_DATA_UINT64 },
103 	{ "inode_deltas",	KSTAT_DATA_UINT64 },
104 	{ "fbiwrite_deltas",	KSTAT_DATA_UINT64 },
105 	{ "quota_deltas",	KSTAT_DATA_UINT64 },
106 	{ "shadow_deltas",	KSTAT_DATA_UINT64 },
107 
108 	{ "superblock_rolled",	KSTAT_DATA_UINT64 },
109 	{ "bitmap_rolled",	KSTAT_DATA_UINT64 },
110 	{ "suminfo_rolled",	KSTAT_DATA_UINT64 },
111 	{ "allocblk_rolled",	KSTAT_DATA_UINT64 },
112 	{ "ab0_rolled",		KSTAT_DATA_UINT64 },
113 	{ "dir_rolled",		KSTAT_DATA_UINT64 },
114 	{ "inode_rolled",	KSTAT_DATA_UINT64 },
115 	{ "fbiwrite_rolled",	KSTAT_DATA_UINT64 },
116 	{ "quota_rolled",	KSTAT_DATA_UINT64 },
117 	{ "shadow_rolled",	KSTAT_DATA_UINT64 }
118 };
119 
120 uint64_t delta_stats[DT_MAX];
121 uint64_t roll_stats[DT_MAX];
122 
123 /*
124  * General logging kstats
125  */
126 struct logstats logstats = {
127 	{ "master_reads",		KSTAT_DATA_UINT64 },
128 	{ "master_writes",		KSTAT_DATA_UINT64 },
129 	{ "log_reads_inmem",		KSTAT_DATA_UINT64 },
130 	{ "log_reads",			KSTAT_DATA_UINT64 },
131 	{ "log_writes",			KSTAT_DATA_UINT64 },
132 	{ "log_master_reads",		KSTAT_DATA_UINT64 },
133 	{ "log_roll_reads",		KSTAT_DATA_UINT64 },
134 	{ "log_roll_writes",		KSTAT_DATA_UINT64 }
135 };
136 
137 int
138 trans_not_done(struct buf *cb)
139 {
140 	sema_v(&cb->b_io);
141 	return (0);
142 }
143 
144 static void
145 trans_wait_panic(struct buf *cb)
146 {
147 	while ((cb->b_flags & B_DONE) == 0)
148 		drv_usecwait(10);
149 }
150 
151 int
152 trans_not_wait(struct buf *cb)
153 {
154 	/*
155 	 * In case of panic, busy wait for completion
156 	 */
157 	if (panicstr)
158 		trans_wait_panic(cb);
159 	else
160 		sema_p(&cb->b_io);
161 
162 	return (geterror(cb));
163 }
164 
165 int
166 trans_wait(struct buf *cb)
167 {
168 	/*
169 	 * In case of panic, busy wait for completion and run md daemon queues
170 	 */
171 	if (panicstr)
172 		trans_wait_panic(cb);
173 	return (biowait(cb));
174 }
175 
176 static void
177 setsum(int32_t *sp, int32_t *lp, int nb)
178 {
179 	int32_t csum = 0;
180 
181 	*sp = 0;
182 	nb /= sizeof (int32_t);
183 	while (nb--)
184 		csum += *lp++;
185 	*sp = csum;
186 }
187 
188 static int
189 checksum(int32_t *sp, int32_t *lp, int nb)
190 {
191 	int32_t ssum = *sp;
192 
193 	setsum(sp, lp, nb);
194 	if (ssum != *sp) {
195 		*sp = ssum;
196 		return (0);
197 	}
198 	return (1);
199 }
200 
201 void
202 lufs_unsnarf(ufsvfs_t *ufsvfsp)
203 {
204 	ml_unit_t *ul;
205 	mt_map_t *mtm;
206 
207 	ul = ufsvfsp->vfs_log;
208 	if (ul == NULL)
209 		return;
210 
211 	mtm = ul->un_logmap;
212 
213 	/*
214 	 * Wait for a pending top_issue_sync which is
215 	 * dispatched (via taskq_dispatch()) but hasnt completed yet.
216 	 */
217 
218 	mutex_enter(&mtm->mtm_lock);
219 
220 	while (mtm->mtm_taskq_sync_count != 0) {
221 		cv_wait(&mtm->mtm_cv, &mtm->mtm_lock);
222 	}
223 
224 	mutex_exit(&mtm->mtm_lock);
225 
226 	/* Roll committed transactions */
227 	logmap_roll_dev(ul);
228 
229 	/* Kill the roll thread */
230 	logmap_kill_roll(ul);
231 
232 	/* release saved alloction info */
233 	if (ul->un_ebp)
234 		kmem_free(ul->un_ebp, ul->un_nbeb);
235 
236 	/* release circular bufs */
237 	free_cirbuf(&ul->un_rdbuf);
238 	free_cirbuf(&ul->un_wrbuf);
239 
240 	/* release maps */
241 	if (ul->un_logmap)
242 		ul->un_logmap = map_put(ul->un_logmap);
243 	if (ul->un_deltamap)
244 		ul->un_deltamap = map_put(ul->un_deltamap);
245 	if (ul->un_matamap)
246 		ul->un_matamap = map_put(ul->un_matamap);
247 
248 	mutex_destroy(&ul->un_log_mutex);
249 	mutex_destroy(&ul->un_state_mutex);
250 
251 	/* release state buffer MUST BE LAST!! (contains our ondisk data) */
252 	if (ul->un_bp)
253 		brelse(ul->un_bp);
254 	kmem_free(ul, sizeof (*ul));
255 
256 	ufsvfsp->vfs_log = NULL;
257 }
258 
259 int
260 lufs_snarf(ufsvfs_t *ufsvfsp, struct fs *fs, int ronly)
261 {
262 	buf_t		*bp, *tbp;
263 	ml_unit_t	*ul;
264 	extent_block_t	*ebp;
265 	ic_extent_block_t  *nebp;
266 	size_t		nb;
267 	daddr_t		bno;	/* in disk blocks */
268 	int		i;
269 
270 	/* LINTED: warning: logical expression always true: op "||" */
271 	ASSERT(sizeof (ml_odunit_t) < DEV_BSIZE);
272 
273 	/*
274 	 * Get the allocation table
275 	 *	During a remount the superblock pointed to by the ufsvfsp
276 	 *	is out of date.  Hence the need for the ``new'' superblock
277 	 *	pointer, fs, passed in as a parameter.
278 	 */
279 	bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, logbtodb(fs, fs->fs_logbno),
280 	    fs->fs_bsize);
281 	if (bp->b_flags & B_ERROR) {
282 		brelse(bp);
283 		return (EIO);
284 	}
285 	ebp = (void *)bp->b_un.b_addr;
286 	if (!checksum(&ebp->chksum, (int32_t *)bp->b_un.b_addr,
287 	    fs->fs_bsize)) {
288 		brelse(bp);
289 		return (ENODEV);
290 	}
291 
292 	/*
293 	 * It is possible to get log blocks with all zeros.
294 	 * We should also check for nextents to be zero in such case.
295 	 */
296 	if (ebp->type != LUFS_EXTENTS || ebp->nextents == 0) {
297 		brelse(bp);
298 		return (EDOM);
299 	}
300 	/*
301 	 * Put allocation into memory.  This requires conversion between
302 	 * on the ondisk format of the extent (type extent_t) and the
303 	 * in-core format of the extent (type ic_extent_t).  The
304 	 * difference is the in-core form of the extent block stores
305 	 * the physical offset of the extent in disk blocks, which
306 	 * can require more than a 32-bit field.
307 	 */
308 	nb = (size_t)(sizeof (ic_extent_block_t) +
309 	    ((ebp->nextents - 1) * sizeof (ic_extent_t)));
310 	nebp = kmem_alloc(nb, KM_SLEEP);
311 	nebp->ic_nextents = ebp->nextents;
312 	nebp->ic_nbytes = ebp->nbytes;
313 	nebp->ic_nextbno = ebp->nextbno;
314 	for (i = 0; i < ebp->nextents; i++) {
315 		nebp->ic_extents[i].ic_lbno = ebp->extents[i].lbno;
316 		nebp->ic_extents[i].ic_nbno = ebp->extents[i].nbno;
317 		nebp->ic_extents[i].ic_pbno =
318 		    logbtodb(fs, ebp->extents[i].pbno);
319 	}
320 	brelse(bp);
321 
322 	/*
323 	 * Get the log state
324 	 */
325 	bno = nebp->ic_extents[0].ic_pbno;
326 	bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, bno, DEV_BSIZE);
327 	if (bp->b_flags & B_ERROR) {
328 		brelse(bp);
329 		bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, bno + 1, DEV_BSIZE);
330 		if (bp->b_flags & B_ERROR) {
331 			brelse(bp);
332 			kmem_free(nebp, nb);
333 			return (EIO);
334 		}
335 	}
336 
337 	/*
338 	 * Put ondisk struct into an anonymous buffer
339 	 *	This buffer will contain the memory for the ml_odunit struct
340 	 */
341 	tbp = ngeteblk(dbtob(LS_SECTORS));
342 	tbp->b_edev = bp->b_edev;
343 	tbp->b_dev = bp->b_dev;
344 	tbp->b_blkno = bno;
345 	bcopy(bp->b_un.b_addr, tbp->b_un.b_addr, DEV_BSIZE);
346 	bcopy(bp->b_un.b_addr, tbp->b_un.b_addr + DEV_BSIZE, DEV_BSIZE);
347 	bp->b_flags |= (B_STALE | B_AGE);
348 	brelse(bp);
349 	bp = tbp;
350 
351 	/*
352 	 * Verify the log state
353 	 *
354 	 * read/only mounts w/bad logs are allowed.  umount will
355 	 * eventually roll the bad log until the first IO error.
356 	 * fsck will then repair the file system.
357 	 *
358 	 * read/write mounts with bad logs are not allowed.
359 	 *
360 	 */
361 	ul = (ml_unit_t *)kmem_zalloc(sizeof (*ul), KM_SLEEP);
362 	bcopy(bp->b_un.b_addr, &ul->un_ondisk, sizeof (ml_odunit_t));
363 	if ((ul->un_chksum != ul->un_head_ident + ul->un_tail_ident) ||
364 	    (ul->un_version != LUFS_VERSION_LATEST) ||
365 	    (!ronly && ul->un_badlog)) {
366 		kmem_free(ul, sizeof (*ul));
367 		brelse(bp);
368 		kmem_free(nebp, nb);
369 		return (EIO);
370 	}
371 	/*
372 	 * Initialize the incore-only fields
373 	 */
374 	if (ronly)
375 		ul->un_flags |= LDL_NOROLL;
376 	ul->un_bp = bp;
377 	ul->un_ufsvfs = ufsvfsp;
378 	ul->un_dev = ufsvfsp->vfs_dev;
379 	ul->un_ebp = nebp;
380 	ul->un_nbeb = nb;
381 	ul->un_maxresv = btodb(ul->un_logsize) * LDL_USABLE_BSIZE;
382 	ul->un_deltamap = map_get(ul, deltamaptype, DELTAMAP_NHASH);
383 	ul->un_logmap = map_get(ul, logmaptype, LOGMAP_NHASH);
384 	if (ul->un_debug & MT_MATAMAP)
385 		ul->un_matamap = map_get(ul, matamaptype, DELTAMAP_NHASH);
386 	mutex_init(&ul->un_log_mutex, NULL, MUTEX_DEFAULT, NULL);
387 	mutex_init(&ul->un_state_mutex, NULL, MUTEX_DEFAULT, NULL);
388 
389 	/*
390 	 * Aquire the ufs_scan_lock before linking the mtm data
391 	 * structure so that we keep ufs_sync() and ufs_update() away
392 	 * when they execute the ufs_scan_inodes() run while we're in
393 	 * progress of enabling/disabling logging.
394 	 */
395 	mutex_enter(&ufs_scan_lock);
396 	ufsvfsp->vfs_log = ul;
397 
398 	/* remember the state of the log before the log scan */
399 	logmap_logscan(ul);
400 	mutex_exit(&ufs_scan_lock);
401 
402 	/*
403 	 * Error during scan
404 	 *
405 	 * If this is a read/only mount; ignore the error.
406 	 * At a later time umount/fsck will repair the fs.
407 	 *
408 	 */
409 	if (ul->un_flags & LDL_ERROR) {
410 		if (!ronly) {
411 			/*
412 			 * Aquire the ufs_scan_lock before de-linking
413 			 * the mtm data structure so that we keep ufs_sync()
414 			 * and ufs_update() away when they execute the
415 			 * ufs_scan_inodes() run while we're in progress of
416 			 * enabling/disabling logging.
417 			 */
418 			mutex_enter(&ufs_scan_lock);
419 			lufs_unsnarf(ufsvfsp);
420 			mutex_exit(&ufs_scan_lock);
421 			return (EIO);
422 		}
423 		ul->un_flags &= ~LDL_ERROR;
424 	}
425 	if (!ronly)
426 		logmap_start_roll(ul);
427 	return (0);
428 }
429 
430 static int
431 lufs_initialize(
432 	ufsvfs_t *ufsvfsp,
433 	daddr_t bno,
434 	size_t nb,
435 	struct fiolog *flp)
436 {
437 	ml_odunit_t	*ud, *ud2;
438 	buf_t		*bp;
439 	struct timeval	tv;
440 
441 	/* LINTED: warning: logical expression always true: op "||" */
442 	ASSERT(sizeof (ml_odunit_t) < DEV_BSIZE);
443 	ASSERT(nb >= ldl_minlogsize);
444 
445 	bp = UFS_GETBLK(ufsvfsp, ufsvfsp->vfs_dev, bno, dbtob(LS_SECTORS));
446 	bzero(bp->b_un.b_addr, bp->b_bcount);
447 
448 	ud = (void *)bp->b_un.b_addr;
449 	ud->od_version = LUFS_VERSION_LATEST;
450 	ud->od_maxtransfer = MIN(ufsvfsp->vfs_iotransz, ldl_maxtransfer);
451 	if (ud->od_maxtransfer < ldl_mintransfer)
452 		ud->od_maxtransfer = ldl_mintransfer;
453 	ud->od_devbsize = DEV_BSIZE;
454 
455 	ud->od_requestsize = flp->nbytes_actual;
456 	ud->od_statesize = dbtob(LS_SECTORS);
457 	ud->od_logsize = nb - ud->od_statesize;
458 
459 	ud->od_statebno = INT32_C(0);
460 
461 	uniqtime(&tv);
462 	if (tv.tv_usec == last_loghead_ident) {
463 		tv.tv_usec++;
464 	}
465 	last_loghead_ident = tv.tv_usec;
466 	ud->od_head_ident = tv.tv_usec;
467 	ud->od_tail_ident = ud->od_head_ident;
468 	ud->od_chksum = ud->od_head_ident + ud->od_tail_ident;
469 
470 	ud->od_bol_lof = dbtob(ud->od_statebno) + ud->od_statesize;
471 	ud->od_eol_lof = ud->od_bol_lof + ud->od_logsize;
472 	ud->od_head_lof = ud->od_bol_lof;
473 	ud->od_tail_lof = ud->od_bol_lof;
474 
475 	ASSERT(lufs_initialize_debug(ud));
476 
477 	ud2 = (void *)(bp->b_un.b_addr + DEV_BSIZE);
478 	bcopy(ud, ud2, sizeof (*ud));
479 
480 	UFS_BWRITE2(ufsvfsp, bp);
481 	if (bp->b_flags & B_ERROR) {
482 		brelse(bp);
483 		return (EIO);
484 	}
485 	brelse(bp);
486 
487 	return (0);
488 }
489 
490 /*
491  * Free log space
492  *	Assumes the file system is write locked and is not logging
493  */
494 static int
495 lufs_free(struct ufsvfs *ufsvfsp)
496 {
497 	int		error = 0, i, j;
498 	buf_t		*bp = NULL;
499 	extent_t	*ep;
500 	extent_block_t	*ebp;
501 	struct fs	*fs = ufsvfsp->vfs_fs;
502 	daddr_t		fno;
503 	int32_t		logbno;
504 	long		nfno;
505 	inode_t		*ip = NULL;
506 	char		clean;
507 
508 	/*
509 	 * Nothing to free
510 	 */
511 	if (fs->fs_logbno == 0)
512 		return (0);
513 
514 	/*
515 	 * Mark the file system as FSACTIVE and no log but honor the
516 	 * current value of fs_reclaim.  The reclaim thread could have
517 	 * been active when lufs_disable() was called and if fs_reclaim
518 	 * is reset to zero here it could lead to lost inodes.
519 	 */
520 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
521 	mutex_enter(&ufsvfsp->vfs_lock);
522 	clean = fs->fs_clean;
523 	logbno = fs->fs_logbno;
524 	fs->fs_clean = FSACTIVE;
525 	fs->fs_logbno = INT32_C(0);
526 	ufs_sbwrite(ufsvfsp);
527 	mutex_exit(&ufsvfsp->vfs_lock);
528 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
529 	if (ufsvfsp->vfs_bufp->b_flags & B_ERROR) {
530 		error = EIO;
531 		fs->fs_clean = clean;
532 		fs->fs_logbno = logbno;
533 		goto errout;
534 	}
535 
536 	/*
537 	 * fetch the allocation block
538 	 *	superblock -> one block of extents -> log data
539 	 */
540 	bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, logbtodb(fs, logbno),
541 	    fs->fs_bsize);
542 	if (bp->b_flags & B_ERROR) {
543 		error = EIO;
544 		goto errout;
545 	}
546 
547 	/*
548 	 * Free up the allocated space (dummy inode needed for free())
549 	 */
550 	ip = ufs_alloc_inode(ufsvfsp, UFSROOTINO);
551 	ebp = (void *)bp->b_un.b_addr;
552 	for (i = 0, ep = &ebp->extents[0]; i < ebp->nextents; ++i, ++ep) {
553 		fno = logbtofrag(fs, ep->pbno);
554 		nfno = dbtofsb(fs, ep->nbno);
555 		for (j = 0; j < nfno; j += fs->fs_frag, fno += fs->fs_frag)
556 			free(ip, fno, fs->fs_bsize, 0);
557 	}
558 	free(ip, logbtofrag(fs, logbno), fs->fs_bsize, 0);
559 	brelse(bp);
560 	bp = NULL;
561 
562 	/*
563 	 * Push the metadata dirtied during the allocations
564 	 */
565 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
566 	sbupdate(ufsvfsp->vfs_vfs);
567 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
568 	bflush(ufsvfsp->vfs_dev);
569 	error = bfinval(ufsvfsp->vfs_dev, 0);
570 	if (error)
571 		goto errout;
572 
573 	/*
574 	 * Free the dummy inode
575 	 */
576 	ufs_free_inode(ip);
577 
578 	return (0);
579 
580 errout:
581 	/*
582 	 * Free up all resources
583 	 */
584 	if (bp)
585 		brelse(bp);
586 	if (ip)
587 		ufs_free_inode(ip);
588 	return (error);
589 }
590 
591 /*
592  * Allocate log space
593  *	Assumes the file system is write locked and is not logging
594  */
595 static int
596 lufs_alloc(struct ufsvfs *ufsvfsp, struct fiolog *flp, cred_t *cr)
597 {
598 	int		error = 0;
599 	buf_t		*bp = NULL;
600 	extent_t	*ep, *nep;
601 	extent_block_t	*ebp;
602 	struct fs	*fs = ufsvfsp->vfs_fs;
603 	daddr_t		fno;	/* in frags */
604 	daddr_t		bno;	/* in disk blocks */
605 	int32_t		logbno = INT32_C(0);	/* will be fs_logbno */
606 	struct inode	*ip = NULL;
607 	size_t		nb = flp->nbytes_actual;
608 	size_t		tb = 0;
609 
610 	/*
611 	 * Mark the file system as FSACTIVE
612 	 */
613 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
614 	mutex_enter(&ufsvfsp->vfs_lock);
615 	fs->fs_clean = FSACTIVE;
616 	ufs_sbwrite(ufsvfsp);
617 	mutex_exit(&ufsvfsp->vfs_lock);
618 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
619 
620 	/*
621 	 * Allocate the allocation block (need dummy shadow inode;
622 	 * we use a shadow inode so the quota sub-system ignores
623 	 * the block allocations.)
624 	 *	superblock -> one block of extents -> log data
625 	 */
626 	ip = ufs_alloc_inode(ufsvfsp, UFSROOTINO);
627 	ip->i_mode = IFSHAD;		/* make the dummy a shadow inode */
628 	rw_enter(&ip->i_contents, RW_WRITER);
629 	fno = contigpref(ufsvfsp, nb + fs->fs_bsize);
630 	error = alloc(ip, fno, fs->fs_bsize, &fno, cr);
631 	if (error)
632 		goto errout;
633 	bno = fsbtodb(fs, fno);
634 
635 	bp = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, bno, fs->fs_bsize);
636 	if (bp->b_flags & B_ERROR) {
637 		error = EIO;
638 		goto errout;
639 	}
640 
641 	ebp = (void *)bp->b_un.b_addr;
642 	ebp->type = LUFS_EXTENTS;
643 	ebp->nextbno = UINT32_C(0);
644 	ebp->nextents = UINT32_C(0);
645 	ebp->chksum = INT32_C(0);
646 	if (fs->fs_magic == FS_MAGIC)
647 		logbno = bno;
648 	else
649 		logbno = dbtofsb(fs, bno);
650 
651 	/*
652 	 * Initialize the first extent
653 	 */
654 	ep = &ebp->extents[0];
655 	error = alloc(ip, fno + fs->fs_frag, fs->fs_bsize, &fno, cr);
656 	if (error)
657 		goto errout;
658 	bno = fsbtodb(fs, fno);
659 
660 	ep->lbno = UINT32_C(0);
661 	if (fs->fs_magic == FS_MAGIC)
662 		ep->pbno = (uint32_t)bno;
663 	else
664 		ep->pbno = (uint32_t)fno;
665 	ep->nbno = (uint32_t)fsbtodb(fs, fs->fs_frag);
666 	ebp->nextents = UINT32_C(1);
667 	tb = fs->fs_bsize;
668 	nb -= fs->fs_bsize;
669 
670 	while (nb) {
671 		error = alloc(ip, fno + fs->fs_frag, fs->fs_bsize, &fno, cr);
672 		if (error) {
673 			if (tb < ldl_minlogsize)
674 				goto errout;
675 			error = 0;
676 			break;
677 		}
678 		bno = fsbtodb(fs, fno);
679 		if ((daddr_t)((logbtodb(fs, ep->pbno) + ep->nbno) == bno))
680 			ep->nbno += (uint32_t)(fsbtodb(fs, fs->fs_frag));
681 		else {
682 			nep = ep + 1;
683 			if ((caddr_t)(nep + 1) >
684 			    (bp->b_un.b_addr + fs->fs_bsize)) {
685 				free(ip, fno, fs->fs_bsize, 0);
686 				break;
687 			}
688 			nep->lbno = ep->lbno + ep->nbno;
689 			if (fs->fs_magic == FS_MAGIC)
690 				nep->pbno = (uint32_t)bno;
691 			else
692 				nep->pbno = (uint32_t)fno;
693 			nep->nbno = (uint32_t)(fsbtodb(fs, fs->fs_frag));
694 			ebp->nextents++;
695 			ep = nep;
696 		}
697 		tb += fs->fs_bsize;
698 		nb -= fs->fs_bsize;
699 	}
700 	ebp->nbytes = (uint32_t)tb;
701 	setsum(&ebp->chksum, (int32_t *)bp->b_un.b_addr, fs->fs_bsize);
702 	UFS_BWRITE2(ufsvfsp, bp);
703 	if (bp->b_flags & B_ERROR) {
704 		error = EIO;
705 		goto errout;
706 	}
707 	/*
708 	 * Initialize the first two sectors of the log
709 	 */
710 	error = lufs_initialize(ufsvfsp, logbtodb(fs, ebp->extents[0].pbno),
711 	    tb, flp);
712 	if (error)
713 		goto errout;
714 
715 	/*
716 	 * We are done initializing the allocation block and the log
717 	 */
718 	brelse(bp);
719 	bp = NULL;
720 
721 	/*
722 	 * Update the superblock and push the dirty metadata
723 	 */
724 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
725 	sbupdate(ufsvfsp->vfs_vfs);
726 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
727 	bflush(ufsvfsp->vfs_dev);
728 	error = bfinval(ufsvfsp->vfs_dev, 1);
729 	if (error)
730 		goto errout;
731 	if (ufsvfsp->vfs_bufp->b_flags & B_ERROR) {
732 		error = EIO;
733 		goto errout;
734 	}
735 
736 	/*
737 	 * Everything is safely on disk; update log space pointer in sb
738 	 */
739 	ufsvfsp->vfs_ulockfs.ul_sbowner = curthread;
740 	mutex_enter(&ufsvfsp->vfs_lock);
741 	fs->fs_logbno = (uint32_t)logbno;
742 	ufs_sbwrite(ufsvfsp);
743 	mutex_exit(&ufsvfsp->vfs_lock);
744 	ufsvfsp->vfs_ulockfs.ul_sbowner = (kthread_id_t)-1;
745 
746 	/*
747 	 * Free the dummy inode
748 	 */
749 	rw_exit(&ip->i_contents);
750 	ufs_free_inode(ip);
751 
752 	/* inform user of real log size */
753 	flp->nbytes_actual = tb;
754 	return (0);
755 
756 errout:
757 	/*
758 	 * Free all resources
759 	 */
760 	if (bp)
761 		brelse(bp);
762 	if (logbno) {
763 		fs->fs_logbno = logbno;
764 		(void) lufs_free(ufsvfsp);
765 	}
766 	if (ip) {
767 		rw_exit(&ip->i_contents);
768 		ufs_free_inode(ip);
769 	}
770 	return (error);
771 }
772 
773 /*
774  * Disable logging
775  */
776 int
777 lufs_disable(vnode_t *vp, struct fiolog *flp)
778 {
779 	int		error = 0;
780 	inode_t		*ip = VTOI(vp);
781 	ufsvfs_t	*ufsvfsp = ip->i_ufsvfs;
782 	struct fs	*fs = ufsvfsp->vfs_fs;
783 	struct lockfs	lf;
784 	struct ulockfs	*ulp;
785 
786 	flp->error = FIOLOG_ENONE;
787 
788 	/*
789 	 * Logging is already disabled; done
790 	 */
791 	if (fs->fs_logbno == 0 || ufsvfsp->vfs_log == NULL)
792 		return (0);
793 
794 	/*
795 	 * Readonly file system
796 	 */
797 	if (fs->fs_ronly) {
798 		flp->error = FIOLOG_EROFS;
799 		return (0);
800 	}
801 
802 	/*
803 	 * File system must be write locked to disable logging
804 	 */
805 	error = ufs_fiolfss(vp, &lf);
806 	if (error) {
807 		return (error);
808 	}
809 	if (!LOCKFS_IS_ULOCK(&lf)) {
810 		flp->error = FIOLOG_EULOCK;
811 		return (0);
812 	}
813 	lf.lf_lock = LOCKFS_WLOCK;
814 	lf.lf_flags = 0;
815 	lf.lf_comment = NULL;
816 	error = ufs_fiolfs(vp, &lf, 1);
817 	if (error) {
818 		flp->error = FIOLOG_EWLOCK;
819 		return (0);
820 	}
821 
822 	if (ufsvfsp->vfs_log == NULL || fs->fs_logbno == 0)
823 		goto errout;
824 
825 	/*
826 	 * WE ARE COMMITTED TO DISABLING LOGGING PAST THIS POINT
827 	 */
828 
829 	/*
830 	 * Disable logging:
831 	 * Suspend the reclaim thread and force the delete thread to exit.
832 	 *	When a nologging mount has completed there may still be
833 	 *	work for reclaim to do so just suspend this thread until
834 	 *	it's [deadlock-] safe for it to continue.  The delete
835 	 *	thread won't be needed as ufs_iinactive() calls
836 	 *	ufs_delete() when logging is disabled.
837 	 * Freeze and drain reader ops.
838 	 *	Commit any outstanding reader transactions (ufs_flush).
839 	 *	Set the ``unmounted'' bit in the ufstrans struct.
840 	 *	If debug, remove metadata from matamap.
841 	 *	Disable matamap processing.
842 	 *	NULL the trans ops table.
843 	 *	Free all of the incore structs related to logging.
844 	 * Allow reader ops.
845 	 */
846 	ufs_thread_suspend(&ufsvfsp->vfs_reclaim);
847 	ufs_thread_exit(&ufsvfsp->vfs_delete);
848 
849 	vfs_lock_wait(ufsvfsp->vfs_vfs);
850 	ulp = &ufsvfsp->vfs_ulockfs;
851 	mutex_enter(&ulp->ul_lock);
852 	atomic_add_long(&ufs_quiesce_pend, 1);
853 	(void) ufs_quiesce(ulp);
854 
855 	(void) ufs_flush(ufsvfsp->vfs_vfs);
856 
857 	TRANS_MATA_UMOUNT(ufsvfsp);
858 	ufsvfsp->vfs_domatamap = 0;
859 
860 	/*
861 	 * Free all of the incore structs
862 	 * Aquire the ufs_scan_lock before de-linking the mtm data
863 	 * structure so that we keep ufs_sync() and ufs_update() away
864 	 * when they execute the ufs_scan_inodes() run while we're in
865 	 * progress of enabling/disabling logging.
866 	 */
867 	mutex_enter(&ufs_scan_lock);
868 	(void) lufs_unsnarf(ufsvfsp);
869 	mutex_exit(&ufs_scan_lock);
870 
871 	atomic_add_long(&ufs_quiesce_pend, -1);
872 	mutex_exit(&ulp->ul_lock);
873 	vfs_setmntopt(ufsvfsp->vfs_vfs, MNTOPT_NOLOGGING, NULL, 0);
874 	vfs_unlock(ufsvfsp->vfs_vfs);
875 
876 	fs->fs_rolled = FS_ALL_ROLLED;
877 	ufsvfsp->vfs_nolog_si = 0;
878 
879 	/*
880 	 * Free the log space and mark the superblock as FSACTIVE
881 	 */
882 	(void) lufs_free(ufsvfsp);
883 
884 	/*
885 	 * Allow the reclaim thread to continue.
886 	 */
887 	ufs_thread_continue(&ufsvfsp->vfs_reclaim);
888 
889 	/*
890 	 * Unlock the file system
891 	 */
892 	lf.lf_lock = LOCKFS_ULOCK;
893 	lf.lf_flags = 0;
894 	error = ufs_fiolfs(vp, &lf, 1);
895 	if (error)
896 		flp->error = FIOLOG_ENOULOCK;
897 
898 	return (0);
899 
900 errout:
901 	lf.lf_lock = LOCKFS_ULOCK;
902 	lf.lf_flags = 0;
903 	(void) ufs_fiolfs(vp, &lf, 1);
904 	return (error);
905 }
906 
907 /*
908  * Enable logging
909  */
910 int
911 lufs_enable(struct vnode *vp, struct fiolog *flp, cred_t *cr)
912 {
913 	int		error;
914 	int		reclaim;
915 	inode_t		*ip = VTOI(vp);
916 	ufsvfs_t	*ufsvfsp = ip->i_ufsvfs;
917 	struct fs	*fs;
918 	ml_unit_t	*ul;
919 	struct lockfs	lf;
920 	struct ulockfs	*ulp;
921 	vfs_t		*vfsp = ufsvfsp->vfs_vfs;
922 	uint64_t	tmp_nbytes_actual;
923 
924 	/*
925 	 * Check if logging is already enabled
926 	 */
927 	if (ufsvfsp->vfs_log) {
928 		flp->error = FIOLOG_ETRANS;
929 		/* for root ensure logging option is set */
930 		vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
931 		return (0);
932 	}
933 	fs = ufsvfsp->vfs_fs;
934 
935 	/*
936 	 * Come back here to recheck if we had to disable the log.
937 	 */
938 recheck:
939 	error = 0;
940 	reclaim = 0;
941 	flp->error = FIOLOG_ENONE;
942 
943 	/*
944 	 * Adjust requested log size
945 	 */
946 	flp->nbytes_actual = flp->nbytes_requested;
947 	if (flp->nbytes_actual == 0) {
948 		tmp_nbytes_actual =
949 		    (((uint64_t)fs->fs_size) / ldl_divisor) << fs->fs_fshift;
950 		flp->nbytes_actual = (uint_t)MIN(tmp_nbytes_actual, INT_MAX);
951 	}
952 	flp->nbytes_actual = MAX(flp->nbytes_actual, ldl_minlogsize);
953 	flp->nbytes_actual = MIN(flp->nbytes_actual, ldl_maxlogsize);
954 	flp->nbytes_actual = blkroundup(fs, flp->nbytes_actual);
955 
956 	/*
957 	 * logging is enabled and the log is the right size; done
958 	 */
959 	ul = ufsvfsp->vfs_log;
960 	if (ul && fs->fs_logbno && (flp->nbytes_actual == ul->un_requestsize))
961 			return (0);
962 
963 	/*
964 	 * Readonly file system
965 	 */
966 	if (fs->fs_ronly) {
967 		flp->error = FIOLOG_EROFS;
968 		return (0);
969 	}
970 
971 	/*
972 	 * File system must be write locked to enable logging
973 	 */
974 	error = ufs_fiolfss(vp, &lf);
975 	if (error) {
976 		return (error);
977 	}
978 	if (!LOCKFS_IS_ULOCK(&lf)) {
979 		flp->error = FIOLOG_EULOCK;
980 		return (0);
981 	}
982 	lf.lf_lock = LOCKFS_WLOCK;
983 	lf.lf_flags = 0;
984 	lf.lf_comment = NULL;
985 	error = ufs_fiolfs(vp, &lf, 1);
986 	if (error) {
987 		flp->error = FIOLOG_EWLOCK;
988 		return (0);
989 	}
990 
991 	/*
992 	 * Grab appropriate locks to synchronize with the rest
993 	 * of the system
994 	 */
995 	vfs_lock_wait(vfsp);
996 	ulp = &ufsvfsp->vfs_ulockfs;
997 	mutex_enter(&ulp->ul_lock);
998 
999 	/*
1000 	 * File system must be fairly consistent to enable logging
1001 	 */
1002 	if (fs->fs_clean != FSLOG &&
1003 	    fs->fs_clean != FSACTIVE &&
1004 	    fs->fs_clean != FSSTABLE &&
1005 	    fs->fs_clean != FSCLEAN) {
1006 		flp->error = FIOLOG_ECLEAN;
1007 		goto unlockout;
1008 	}
1009 
1010 	/*
1011 	 * A write-locked file system is only active if there are
1012 	 * open deleted files; so remember to set FS_RECLAIM later.
1013 	 */
1014 	if (fs->fs_clean == FSACTIVE)
1015 		reclaim = FS_RECLAIM;
1016 
1017 	/*
1018 	 * Logging is already enabled; must be changing the log's size
1019 	 */
1020 	if (fs->fs_logbno && ufsvfsp->vfs_log) {
1021 		/*
1022 		 * Before we can disable logging, we must give up our
1023 		 * lock.  As a consequence of unlocking and disabling the
1024 		 * log, the fs structure may change.  Because of this, when
1025 		 * disabling is complete, we will go back to recheck to
1026 		 * repeat all of the checks that we performed to get to
1027 		 * this point.  Disabling sets fs->fs_logbno to 0, so this
1028 		 * will not put us into an infinite loop.
1029 		 */
1030 		mutex_exit(&ulp->ul_lock);
1031 		vfs_unlock(vfsp);
1032 
1033 		lf.lf_lock = LOCKFS_ULOCK;
1034 		lf.lf_flags = 0;
1035 		error = ufs_fiolfs(vp, &lf, 1);
1036 		if (error) {
1037 			flp->error = FIOLOG_ENOULOCK;
1038 			return (0);
1039 		}
1040 		error = lufs_disable(vp, flp);
1041 		if (error || (flp->error != FIOLOG_ENONE))
1042 			return (0);
1043 		goto recheck;
1044 	}
1045 
1046 	error = lufs_alloc(ufsvfsp, flp, cr);
1047 	if (error)
1048 		goto errout;
1049 
1050 	/*
1051 	 * Create all of the incore structs
1052 	 */
1053 	error = lufs_snarf(ufsvfsp, fs, 0);
1054 	if (error)
1055 		goto errout;
1056 
1057 	/*
1058 	 * DON'T ``GOTO ERROUT'' PAST THIS POINT
1059 	 */
1060 
1061 	/*
1062 	 * Pretend we were just mounted with logging enabled
1063 	 *		Get the ops vector
1064 	 *		If debug, record metadata locations with log subsystem
1065 	 *		Start the delete thread
1066 	 *		Start the reclaim thread, if necessary
1067 	 */
1068 	vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
1069 
1070 	TRANS_DOMATAMAP(ufsvfsp);
1071 	TRANS_MATA_MOUNT(ufsvfsp);
1072 	TRANS_MATA_SI(ufsvfsp, fs);
1073 	ufs_thread_start(&ufsvfsp->vfs_delete, ufs_thread_delete, vfsp);
1074 	if (fs->fs_reclaim & (FS_RECLAIM|FS_RECLAIMING)) {
1075 		fs->fs_reclaim &= ~FS_RECLAIM;
1076 		fs->fs_reclaim |=  FS_RECLAIMING;
1077 		ufs_thread_start(&ufsvfsp->vfs_reclaim,
1078 		    ufs_thread_reclaim, vfsp);
1079 	} else
1080 		fs->fs_reclaim |= reclaim;
1081 
1082 	mutex_exit(&ulp->ul_lock);
1083 	vfs_unlock(vfsp);
1084 
1085 	/*
1086 	 * Unlock the file system
1087 	 */
1088 	lf.lf_lock = LOCKFS_ULOCK;
1089 	lf.lf_flags = 0;
1090 	error = ufs_fiolfs(vp, &lf, 1);
1091 	if (error) {
1092 		flp->error = FIOLOG_ENOULOCK;
1093 		return (0);
1094 	}
1095 
1096 	/*
1097 	 * There's nothing in the log yet (we've just allocated it)
1098 	 * so directly write out the super block.
1099 	 * Note, we have to force this sb out to disk
1100 	 * (not just to the log) so that if we crash we know we are logging
1101 	 */
1102 	mutex_enter(&ufsvfsp->vfs_lock);
1103 	fs->fs_clean = FSLOG;
1104 	fs->fs_rolled = FS_NEED_ROLL; /* Mark the fs as unrolled */
1105 	UFS_BWRITE2(NULL, ufsvfsp->vfs_bufp);
1106 	mutex_exit(&ufsvfsp->vfs_lock);
1107 
1108 	return (0);
1109 
1110 errout:
1111 	/*
1112 	 * Aquire the ufs_scan_lock before de-linking the mtm data
1113 	 * structure so that we keep ufs_sync() and ufs_update() away
1114 	 * when they execute the ufs_scan_inodes() run while we're in
1115 	 * progress of enabling/disabling logging.
1116 	 */
1117 	mutex_enter(&ufs_scan_lock);
1118 	(void) lufs_unsnarf(ufsvfsp);
1119 	mutex_exit(&ufs_scan_lock);
1120 
1121 	(void) lufs_free(ufsvfsp);
1122 unlockout:
1123 	mutex_exit(&ulp->ul_lock);
1124 	vfs_unlock(vfsp);
1125 
1126 	lf.lf_lock = LOCKFS_ULOCK;
1127 	lf.lf_flags = 0;
1128 	(void) ufs_fiolfs(vp, &lf, 1);
1129 	return (error);
1130 }
1131 
1132 void
1133 lufs_read_strategy(ml_unit_t *ul, buf_t *bp)
1134 {
1135 	mt_map_t	*logmap	= ul->un_logmap;
1136 	offset_t	mof	= ldbtob(bp->b_blkno);
1137 	off_t		nb	= bp->b_bcount;
1138 	mapentry_t	*age;
1139 	char		*va;
1140 	int		(*saviodone)();
1141 	int		entire_range;
1142 
1143 	/*
1144 	 * get a linked list of overlapping deltas
1145 	 * returns with &mtm->mtm_rwlock held
1146 	 */
1147 	entire_range = logmap_list_get(logmap, mof, nb, &age);
1148 
1149 	/*
1150 	 * no overlapping deltas were found; read master
1151 	 */
1152 	if (age == NULL) {
1153 		rw_exit(&logmap->mtm_rwlock);
1154 		if (ul->un_flags & LDL_ERROR) {
1155 			bp->b_flags |= B_ERROR;
1156 			bp->b_error = EIO;
1157 			biodone(bp);
1158 		} else {
1159 			ul->un_ufsvfs->vfs_iotstamp = lbolt;
1160 			logstats.ls_lreads.value.ui64++;
1161 			(void) bdev_strategy(bp);
1162 			lwp_stat_update(LWP_STAT_INBLK, 1);
1163 		}
1164 		return;
1165 	}
1166 
1167 	va = bp_mapin_common(bp, VM_SLEEP);
1168 	/*
1169 	 * if necessary, sync read the data from master
1170 	 *	errors are returned in bp
1171 	 */
1172 	if (!entire_range) {
1173 		saviodone = bp->b_iodone;
1174 		bp->b_iodone = trans_not_done;
1175 		logstats.ls_mreads.value.ui64++;
1176 		(void) bdev_strategy(bp);
1177 		lwp_stat_update(LWP_STAT_INBLK, 1);
1178 		if (trans_not_wait(bp))
1179 			ldl_seterror(ul, "Error reading master");
1180 		bp->b_iodone = saviodone;
1181 	}
1182 
1183 	/*
1184 	 * sync read the data from the log
1185 	 *	errors are returned inline
1186 	 */
1187 	if (ldl_read(ul, va, mof, nb, age)) {
1188 		bp->b_flags |= B_ERROR;
1189 		bp->b_error = EIO;
1190 	}
1191 
1192 	/*
1193 	 * unlist the deltas
1194 	 */
1195 	logmap_list_put(logmap, age);
1196 
1197 	/*
1198 	 * all done
1199 	 */
1200 	if (ul->un_flags & LDL_ERROR) {
1201 		bp->b_flags |= B_ERROR;
1202 		bp->b_error = EIO;
1203 	}
1204 	biodone(bp);
1205 }
1206 
1207 void
1208 lufs_write_strategy(ml_unit_t *ul, buf_t *bp)
1209 {
1210 	offset_t	mof	= ldbtob(bp->b_blkno);
1211 	off_t		nb	= bp->b_bcount;
1212 	char		*va;
1213 	mapentry_t	*me;
1214 
1215 	ASSERT((nb & DEV_BMASK) == 0);
1216 	ul->un_logmap->mtm_ref = 1;
1217 
1218 	/*
1219 	 * if there are deltas, move into log
1220 	 */
1221 	me = deltamap_remove(ul->un_deltamap, mof, nb);
1222 	if (me) {
1223 
1224 		va = bp_mapin_common(bp, VM_SLEEP);
1225 
1226 		ASSERT(((ul->un_debug & MT_WRITE_CHECK) == 0) ||
1227 		    (ul->un_matamap == NULL)||
1228 		    matamap_within(ul->un_matamap, mof, nb));
1229 
1230 		/*
1231 		 * move to logmap
1232 		 */
1233 		if (ufs_crb_enable) {
1234 			logmap_add_buf(ul, va, mof, me,
1235 			    bp->b_un.b_addr, nb);
1236 		} else {
1237 			logmap_add(ul, va, mof, me);
1238 		}
1239 
1240 		if (ul->un_flags & LDL_ERROR) {
1241 			bp->b_flags |= B_ERROR;
1242 			bp->b_error = EIO;
1243 		}
1244 		biodone(bp);
1245 		return;
1246 	}
1247 	if (ul->un_flags & LDL_ERROR) {
1248 		bp->b_flags |= B_ERROR;
1249 		bp->b_error = EIO;
1250 		biodone(bp);
1251 		return;
1252 	}
1253 
1254 	/*
1255 	 * Check that we are not updating metadata, or if so then via B_PHYS.
1256 	 */
1257 	ASSERT((ul->un_matamap == NULL) ||
1258 	    !(matamap_overlap(ul->un_matamap, mof, nb) &&
1259 	    ((bp->b_flags & B_PHYS) == 0)));
1260 
1261 	ul->un_ufsvfs->vfs_iotstamp = lbolt;
1262 	logstats.ls_lwrites.value.ui64++;
1263 
1264 	/* If snapshots are enabled, write through the snapshot driver */
1265 	if (ul->un_ufsvfs->vfs_snapshot)
1266 		fssnap_strategy(&ul->un_ufsvfs->vfs_snapshot, bp);
1267 	else
1268 		(void) bdev_strategy(bp);
1269 
1270 	lwp_stat_update(LWP_STAT_OUBLK, 1);
1271 }
1272 
1273 void
1274 lufs_strategy(ml_unit_t *ul, buf_t *bp)
1275 {
1276 	if (bp->b_flags & B_READ)
1277 		lufs_read_strategy(ul, bp);
1278 	else
1279 		lufs_write_strategy(ul, bp);
1280 }
1281 
1282 /* ARGSUSED */
1283 static int
1284 delta_stats_update(kstat_t *ksp, int rw)
1285 {
1286 	if (rw == KSTAT_WRITE) {
1287 		delta_stats[DT_SB] = dkstats.ds_superblock_deltas.value.ui64;
1288 		delta_stats[DT_CG] = dkstats.ds_bitmap_deltas.value.ui64;
1289 		delta_stats[DT_SI] = dkstats.ds_suminfo_deltas.value.ui64;
1290 		delta_stats[DT_AB] = dkstats.ds_allocblk_deltas.value.ui64;
1291 		delta_stats[DT_ABZERO] = dkstats.ds_ab0_deltas.value.ui64;
1292 		delta_stats[DT_DIR] = dkstats.ds_dir_deltas.value.ui64;
1293 		delta_stats[DT_INODE] = dkstats.ds_inode_deltas.value.ui64;
1294 		delta_stats[DT_FBI] = dkstats.ds_fbiwrite_deltas.value.ui64;
1295 		delta_stats[DT_QR] = dkstats.ds_quota_deltas.value.ui64;
1296 		delta_stats[DT_SHAD] = dkstats.ds_shadow_deltas.value.ui64;
1297 
1298 		roll_stats[DT_SB] = dkstats.ds_superblock_rolled.value.ui64;
1299 		roll_stats[DT_CG] = dkstats.ds_bitmap_rolled.value.ui64;
1300 		roll_stats[DT_SI] = dkstats.ds_suminfo_rolled.value.ui64;
1301 		roll_stats[DT_AB] = dkstats.ds_allocblk_rolled.value.ui64;
1302 		roll_stats[DT_ABZERO] = dkstats.ds_ab0_rolled.value.ui64;
1303 		roll_stats[DT_DIR] = dkstats.ds_dir_rolled.value.ui64;
1304 		roll_stats[DT_INODE] = dkstats.ds_inode_rolled.value.ui64;
1305 		roll_stats[DT_FBI] = dkstats.ds_fbiwrite_rolled.value.ui64;
1306 		roll_stats[DT_QR] = dkstats.ds_quota_rolled.value.ui64;
1307 		roll_stats[DT_SHAD] = dkstats.ds_shadow_rolled.value.ui64;
1308 	} else {
1309 		dkstats.ds_superblock_deltas.value.ui64 = delta_stats[DT_SB];
1310 		dkstats.ds_bitmap_deltas.value.ui64 = delta_stats[DT_CG];
1311 		dkstats.ds_suminfo_deltas.value.ui64 = delta_stats[DT_SI];
1312 		dkstats.ds_allocblk_deltas.value.ui64 = delta_stats[DT_AB];
1313 		dkstats.ds_ab0_deltas.value.ui64 = delta_stats[DT_ABZERO];
1314 		dkstats.ds_dir_deltas.value.ui64 = delta_stats[DT_DIR];
1315 		dkstats.ds_inode_deltas.value.ui64 = delta_stats[DT_INODE];
1316 		dkstats.ds_fbiwrite_deltas.value.ui64 = delta_stats[DT_FBI];
1317 		dkstats.ds_quota_deltas.value.ui64 = delta_stats[DT_QR];
1318 		dkstats.ds_shadow_deltas.value.ui64 = delta_stats[DT_SHAD];
1319 
1320 		dkstats.ds_superblock_rolled.value.ui64 = roll_stats[DT_SB];
1321 		dkstats.ds_bitmap_rolled.value.ui64 = roll_stats[DT_CG];
1322 		dkstats.ds_suminfo_rolled.value.ui64 = roll_stats[DT_SI];
1323 		dkstats.ds_allocblk_rolled.value.ui64 = roll_stats[DT_AB];
1324 		dkstats.ds_ab0_rolled.value.ui64 = roll_stats[DT_ABZERO];
1325 		dkstats.ds_dir_rolled.value.ui64 = roll_stats[DT_DIR];
1326 		dkstats.ds_inode_rolled.value.ui64 = roll_stats[DT_INODE];
1327 		dkstats.ds_fbiwrite_rolled.value.ui64 = roll_stats[DT_FBI];
1328 		dkstats.ds_quota_rolled.value.ui64 = roll_stats[DT_QR];
1329 		dkstats.ds_shadow_rolled.value.ui64 = roll_stats[DT_SHAD];
1330 	}
1331 	return (0);
1332 }
1333 
1334 extern size_t ufs_crb_limit;
1335 extern int ufs_max_crb_divisor;
1336 
1337 void
1338 lufs_init(void)
1339 {
1340 	kstat_t *ksp;
1341 
1342 	/* Create kmem caches */
1343 	lufs_sv = kmem_cache_create("lufs_save", sizeof (lufs_save_t), 0,
1344 	    NULL, NULL, NULL, NULL, NULL, 0);
1345 	lufs_bp = kmem_cache_create("lufs_bufs", sizeof (lufs_buf_t), 0,
1346 	    NULL, NULL, NULL, NULL, NULL, 0);
1347 
1348 	mutex_init(&log_mutex, NULL, MUTEX_DEFAULT, NULL);
1349 
1350 	_init_top();
1351 
1352 	if (&bio_lufs_strategy != NULL)
1353 		bio_lufs_strategy = (void (*) (void *, buf_t *)) lufs_strategy;
1354 
1355 	/*
1356 	 * Initialise general logging and delta kstats
1357 	 */
1358 	ksp = kstat_create("ufs_log", 0, "logstats", "ufs", KSTAT_TYPE_NAMED,
1359 	    sizeof (logstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
1360 	if (ksp) {
1361 		ksp->ks_data = (void *) &logstats;
1362 		kstat_install(ksp);
1363 	}
1364 
1365 	ksp = kstat_create("ufs_log", 0, "deltastats", "ufs", KSTAT_TYPE_NAMED,
1366 	    sizeof (dkstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
1367 	if (ksp) {
1368 		ksp->ks_data = (void *) &dkstats;
1369 		ksp->ks_update = delta_stats_update;
1370 		kstat_install(ksp);
1371 	}
1372 
1373 	/*
1374 	 * Set up the maximum amount of kmem that the crbs (system wide)
1375 	 * can use.
1376 	 */
1377 	ufs_crb_limit = kmem_maxavail() / ufs_max_crb_divisor;
1378 }
1379